From 0e06bd21d82203038b9a7d96a6d7b199e2d573a5 Mon Sep 17 00:00:00 2001 From: martynwei Date: Tue, 26 Oct 2021 23:31:12 -0400 Subject: [PATCH 001/244] Add vits files Add vits_losses.py, vits_modules.py and vits.py. --- nemo/collections/tts/losses/vits_losses.py | 127 ++++ nemo/collections/tts/models/vits.py | 665 +++++++++++++++++++ nemo/collections/tts/modules/vits_modules.py | 391 +++++++++++ 3 files changed, 1183 insertions(+) create mode 100644 nemo/collections/tts/losses/vits_losses.py create mode 100644 nemo/collections/tts/models/vits.py create mode 100644 nemo/collections/tts/modules/vits_modules.py diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py new file mode 100644 index 000000000000..330cbbe44c2b --- /dev/null +++ b/nemo/collections/tts/losses/vits_losses.py @@ -0,0 +1,127 @@ +import torch +from torch.nn import functional as F + +from nemo.core.classes import Loss, typecheck +from nemo.core.neural_types.elements import LossType, VoidType +from nemo.core.neural_types.neural_type import NeuralType + + +class FeatureLoss(Loss): + def input_types(self): + return { + "fmap_r": [[NeuralType(elements_type=VoidType())]], + "fmap_g": [[NeuralType(elements_type=VoidType())]], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + @typecheck() + def forward(self, fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +class DiscriminatorLoss(Loss): + @property + def input_types(self): + return { + "disc_real_outputs": [NeuralType(('B', 'T'), VoidType())], + "disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "real_losses": [NeuralType(elements_type=LossType())], + "fake_losses": [NeuralType(elements_type=LossType())], + } + + def forward(disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + dr = dr.float() + dg = dg.float() + r_loss = torch.mean((1-dr)**2) + g_loss = torch.mean(dg**2) + loss += (r_loss + g_loss) + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +class GeneratorLoss(Loss): + """Generator Loss module""" + + @property + def input_types(self): + return { + "disc_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "fake_losses": [NeuralType(elements_type=LossType())], + } + + @typecheck() + def forward(self, disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + + +class KlLoss(Loss): + @property + def input_types(self): + return { + "z_p": [NeuralType(('B', 'T'), VoidType())], + "logs_q": [NeuralType(('B', 'T'), VoidType())], + "m_p": [NeuralType(('B', 'T'), VoidType())], + "logs_p": [NeuralType(('B', 'T'), VoidType())], + "z_mask": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + @typecheck() + def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): + """ + z_p, logs_q: [b, h, t_t] + m_p, logs_p: [b, h, t_t] + """ + z_p = z_p.float() + logs_q = logs_q.float() + m_p = m_p.float() + logs_p = logs_p.float() + z_mask = z_mask.float() + + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) + kl = torch.sum(kl * z_mask) + l = kl / torch.sum(z_mask) + return l diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py new file mode 100644 index 000000000000..7127541412b9 --- /dev/null +++ b/nemo/collections/tts/models/vits.py @@ -0,0 +1,665 @@ +from dataclasses import dataclass +from itertools import chain +from typing import Any, Dict + +import numpy as np +import torch +from hydra.utils import instantiate +from omegaconf import MISSING, DictConfig, OmegaConf +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +import math +import torch +from torch import nn +from torch.nn import functional as F + +import commons +import modules +import attentions +import monotonic_align + +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from commons import init_weights, get_padding + +from nemo.collections.asr.data.audio_to_text import FastPitchDataset +from nemo.collections.common.parts.preprocessing import parsers +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len +from nemo.collections.tts.models.base import TextToWaveform +from nemo.core.classes.common import PretrainedModelInfo, typecheck +from nemo.core.neural_types.elements import ( + MelSpectrogramType, + RegressionValuesType, + TokenDurationType, + TokenIndex, + TokenLogDurationType, +) +from nemo.core.neural_types.neural_type import NeuralType +from nemo.core.optim.lr_scheduler import NoamAnnealing +from nemo.utils import logging +from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss +#from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator +import nemo.collections.tts.modules.vits_modules as modules + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = modules.Log() + self.flows = nn.ModuleList() + self.flows.append(modules.ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(modules.Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(modules.ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(modules.Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) + logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_1 = modules.LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_2 = modules.LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.emb = nn.Embedding(n_vocab, hidden_channels) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + + self.encoder = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths): + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class Generator(torch.nn.Module): + def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append(weight_norm( + ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), + k, u, padding=(k-u)//2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel//(2**(i+1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x) + else: + xs += self.resblocks[i*self.num_kernels+j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2,3,5,7,11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__(self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder(n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, x, x_lengths, y, y_lengths, sid=None): + + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging + + # expand prior + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) + + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = commons.generate_path(w_ceil, attn_mask) + + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, reverse=True) + o = self.dec((z * y_mask)[:,:,:max_len], g=g) + return o, attn, y_mask, (z, z_p, m_p, logs_p) + + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 0, "n_speakers have to be larger than 0." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) + + +@dataclass +class VitsConfig: + parser: Dict[Any, Any] = MISSING + preprocessor: Dict[Any, Any] = MISSING + input_fft: Dict[Any, Any] = MISSING + output_fft: Dict[Any, Any] = MISSING + duration_predictor: Dict[Any, Any] = MISSING + pitch_predictor: Dict[Any, Any] = MISSING + + +class Vits(TextToWaveform): + def __init__(self, cfg: DictConfig, trainer: Trainer = None): + if isinstance(cfg, dict): + cfg = OmegaConf.create(cfg) + + self._parser = parsers.make_parser( + labels=cfg.labels, + name='en', + unk_id=-1, + blank_id=-1, + do_normalize=True, + abbreviation_version="fastpitch", + make_table=False, + ) + + super().__init__(cfg=cfg, trainer=trainer) + + schema = OmegaConf.structured(VitsConfig) + # ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes + if isinstance(cfg, dict): + cfg = OmegaConf.create(cfg) + elif not isinstance(cfg, DictConfig): + raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig") + # Ensure passed cfg is compliant with schema + OmegaConf.merge(cfg, schema) + + self.preprocessor = instantiate(cfg.preprocessor) + self.melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True) + + self.encoder = instantiate(cfg.input_fft) + self.duration_predictor = instantiate(cfg.duration_predictor) + self.pitch_predictor = instantiate(cfg.pitch_predictor) + + self.generator = instantiate(cfg.generator) + self.multiperioddisc = MultiPeriodDiscriminator() + self.feat_matching_loss = FeatureLoss() + self.disc_loss = DiscriminatorLoss() + self.gen_loss = GeneratorLoss() + + self.max_token_duration = cfg.max_token_duration + + self.pitch_emb = torch.nn.Conv1d( + 1, + cfg.symbols_embedding_dim, + kernel_size=cfg.pitch_embedding_kernel_size, + padding=int((cfg.pitch_embedding_kernel_size - 1) / 2), + ) + + # Store values precomputed from training data for convenience + self.register_buffer('pitch_mean', torch.zeros(1)) + self.register_buffer('pitch_std', torch.zeros(1)) + + self.mel_loss_coeff = cfg.mel_loss_coeff + + self.log_train_images = False + self.logged_real_samples = False + self._tb_logger = None + self.hann_window = None + self.splice_length = cfg.splice_length + self.sample_rate = cfg.sample_rate + self.hop_size = cfg.hop_size + + def parse(self, str_input: str) -> torch.tensor: + # TODO: Implement + pass + + def configure_optimizers(self): + # TODO: Implement + pass + + def setup_training_data(self, cfg): + # TODO: Call data preprocessing + pass + + def setup_validation_data(self, cfg): + # TODO: Call data preprocessing + pass + + def setup_test_data(self, cfg): + """Omitted.""" + pass + + def list_available_models(cls) -> 'List[PretrainedModelInfo]': + list_of_models = [] + # TODO: List available models?? + return list_of_models + + def convert_text_to_waveform(self, *, tokens): + # TODO: Convert text to waveforms + pass + diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py new file mode 100644 index 000000000000..c92bbe03c687 --- /dev/null +++ b/nemo/collections/tts/modules/vits_modules.py @@ -0,0 +1,391 @@ +import copy +import math +import numpy as np +import scipy +import torch +from torch import nn +from torch.nn import functional as F + +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm + +import commons +from commons import init_weights, get_padding +from transforms import piecewise_rational_quadratic_transform + +LRELU_SLOPE = 0.1 + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size ** i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, + groups=channels, dilation=dilation, padding=padding + )) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size, + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = commons.fused_add_tanh_sigmoid_multiply( + x_in, + g_l, + n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, :self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels:, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, + gin_channels=gin_channels) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + +class ConvFlow(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) + self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins:] + + x1, logabsdet = piecewise_rational_quadratic_transform(x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x From 07644acb0b869d1a487239b13c1e61db3c003b0b Mon Sep 17 00:00:00 2001 From: martynwei Date: Tue, 2 Nov 2021 23:24:41 -0400 Subject: [PATCH 002/244] Move non-vits models to modules --- nemo/collections/tts/models/vits.py | 526 +----------------- nemo/collections/tts/modules/vits_modules.py | 555 +++++++++++++++++++ 2 files changed, 557 insertions(+), 524 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 7127541412b9..9805bbebd3bf 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -39,529 +39,9 @@ from nemo.utils import logging from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss -#from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator import nemo.collections.tts.modules.vits_modules as modules -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - - @dataclass class VitsConfig: parser: Dict[Any, Any] = MISSING @@ -643,12 +123,10 @@ def configure_optimizers(self): pass def setup_training_data(self, cfg): - # TODO: Call data preprocessing - pass + self._train_dl = self._loader(cfg) def setup_validation_data(self, cfg): - # TODO: Call data preprocessing - pass + self._validation_dl = self._loader(cfg) def setup_test_data(self, cfg): """Omitted.""" diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index c92bbe03c687..18bec37a8f90 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -8,10 +8,46 @@ from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm +from dataclasses import dataclass +from itertools import chain +from typing import Any, Dict + +from hydra.utils import instantiate +from omegaconf import MISSING, DictConfig, OmegaConf +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +import math + +import commons +import modules +import attentions +import monotonic_align + +from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from commons import init_weights, get_padding import commons from commons import init_weights, get_padding from transforms import piecewise_rational_quadratic_transform +from nemo.collections.asr.data.audio_to_text import FastPitchDataset +from nemo.collections.common.parts.preprocessing import parsers +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len +from nemo.collections.tts.models.base import TextToWaveform +from nemo.core.classes.common import PretrainedModelInfo, typecheck +from nemo.core.neural_types.elements import ( + MelSpectrogramType, + RegressionValuesType, + TokenDurationType, + TokenIndex, + TokenLogDurationType, +) +from nemo.core.neural_types.neural_type import NeuralType +from nemo.core.optim.lr_scheduler import NoamAnnealing +from nemo.utils import logging +from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss +#from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator LRELU_SLOPE = 0.1 @@ -389,3 +425,522 @@ def forward(self, x, x_mask, g=None, reverse=False): return x, logdet else: return x + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = modules.Log() + self.flows = nn.ModuleList() + self.flows.append(modules.ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(modules.Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(modules.ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(modules.Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) + logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_1 = modules.LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_2 = modules.LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.emb = nn.Embedding(n_vocab, hidden_channels) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + + self.encoder = attentions.Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths): + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append(modules.Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class Generator(torch.nn.Module): + def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append(weight_norm( + ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), + k, u, padding=(k-u)//2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel//(2**(i+1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x) + else: + xs += self.resblocks[i*self.num_kernels+j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, modules.LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2,3,5,7,11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__(self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder(n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, x, x_lengths, y, y_lengths, sid=None): + + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging + + # expand prior + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) + + z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = commons.generate_path(w_ceil, attn_mask) + + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, reverse=True) + o = self.dec((z * y_mask)[:,:,:max_len], g=g) + return o, attn, y_mask, (z, z_p, m_p, logs_p) + + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 0, "n_speakers have to be larger than 0." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) From 49d5341b681a1f3ae1f12799a101b228f14d2766 Mon Sep 17 00:00:00 2001 From: martynwei Date: Tue, 2 Nov 2021 23:25:12 -0400 Subject: [PATCH 003/244] Add vits.yaml --- examples/tts/conf/vits.yaml | 138 ++++++++++++++++++++++++++++++++++++ 1 file changed, 138 insertions(+) create mode 100644 examples/tts/conf/vits.yaml diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml new file mode 100644 index 000000000000..179194ce3592 --- /dev/null +++ b/examples/tts/conf/vits.yaml @@ -0,0 +1,138 @@ +name: "VITS" + +labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] +train_dataset: ??? +validation_datasets: ??? +test_datasets: null + +model: + sample_rate: 22050 + splice_length: 64 + lr: 3e-2 + labels: ${labels} + n_speakers: 1 + symbols_embedding_dim: 384 + max_token_duration: 75 + n_mel_channels: 80 + pitch_embedding_kernel_size: 3 + mel_loss_coeff: 40 + hop_size: 256 + + train_ds: + manifest_filepath: ${train_dataset} + max_duration: null + min_duration: 0.1 + sample_rate: ${model.sample_rate} + trim: false + parser: null + drop_last: true + shuffle: true + batch_size: 64 + num_workers: 12 + + validation_ds: + manifest_filepath: ${validation_datasets} + sample_rate: ${model.sample_rate} + trim: false + parser: null + drop_last: false + shuffle: false + batch_size: 64 + num_workers: 8 + + preprocessor: + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures + dither: 0.0 + nfilt: ${model.n_mel_channels} + frame_splicing: 1 + highfreq: 8000 + log: true + log_zero_guard_type: clamp + log_zero_guard_value: 1e-05 + lowfreq: 0 + mag_power: 1.0 + n_fft: 1024 + n_window_size: 1024 + n_window_stride: ${model.hop_size} + normalize: null + pad_to: 1 + pad_value: 0 + preemph: null + sample_rate: ${model.sample_rate} + window: hann + exact_pad: true + use_grads: false + + input_fft: + _target_: nemo.collections.tts.modules.transformer.FFTransformerEncoder + n_layer: 6 + n_head: 1 + d_model: ${model.symbols_embedding_dim} + d_head: 64 + d_inner: 1536 + kernel_size: 3 + dropout: 0.1 + dropatt: 0.1 + dropemb: 0.0 + n_embed: 148 # NOTE Should match # of tokens in `symbol_set` + d_embed: ${model.symbols_embedding_dim} + padding_idx: 0 + + output_fft: + _target_: nemo.collections.tts.modules.transformer.FFTransformerDecoder + n_layer: 6 + n_head: 1 + d_model: ${model.symbols_embedding_dim} + d_head: 64 + d_inner: 1536 + kernel_size: 3 + dropout: 0.1 + dropatt: 0.1 + dropemb: 0.0 + + duration_predictor: + _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor + input_size: ${model.symbols_embedding_dim} + kernel_size: 3 + filter_size: 256 + dropout: 0.1 + n_layers: 2 + + pitch_predictor: + _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor + input_size: ${model.symbols_embedding_dim} + kernel_size: 3 + filter_size: 256 + dropout: 0.1 + n_layers: 2 + + generator: + _target_: nemo.collections.tts.modules.vits_modules.Generator + upsample_kernel_sizes: [16,16,4,4] + upsample_rates: [8,8,2,2] + upsample_initial_channel: 512 + resblock_kernel_sizes: [3,7,11] + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + resblock: 1 + initial_input_size: 384 + +trainer: + gpus: -1 # number of gpus + max_epochs: 1500 + num_nodes: 1 + accelerator: ddp + accumulate_grad_batches: 1 + checkpoint_callback: False # Provided by exp_manager + logger: False # Provided by exp_manager + gradient_clip_val: 1000.0 + flush_logs_every_n_steps: 1000 + log_every_n_steps: 100 + check_val_every_n_epoch: 5 + precision: 16 + +exp_manager: + exp_dir: null + name: ${name} + create_tensorboard_logger: True + create_checkpoint_callback: True From 8810a82eec25d385388e3f797aefcad6149065c9 Mon Sep 17 00:00:00 2001 From: Ryan Hong <66425733+rhong99@users.noreply.github.com> Date: Wed, 10 Nov 2021 11:10:55 -0500 Subject: [PATCH 004/244] Add _loader to vits.py --- nemo/collections/tts/models/vits.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 9805bbebd3bf..3dd91bec15ec 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -122,6 +122,19 @@ def configure_optimizers(self): # TODO: Implement pass + @staticmethod + def _loader(cfg): + try: + _ = cfg.dataset.manifest_filepath + except omegaconf.errors.MissingMandatoryValue: + logging.warning("manifest_filepath was skipped. No dataset for this model.") + return None + + dataset = instantiate(cfg.dataset) + return torch.utils.data.DataLoader( # noqa + dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, + ) + def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) From f87b765fbf93c2bbced7b10c8eae651078ada4b2 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 10 Nov 2021 11:47:57 -0500 Subject: [PATCH 005/244] Add basic template for vits --- nemo/collections/tts/models/vits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 3dd91bec15ec..bfb45a3464fe 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -53,7 +53,7 @@ class VitsConfig: class Vits(TextToWaveform): - def __init__(self, cfg: DictConfig, trainer: Trainer = None): + def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) From f2189b6fda60f63a08305bed2799013fc16553e5 Mon Sep 17 00:00:00 2001 From: martynwei Date: Wed, 10 Nov 2021 11:58:04 -0500 Subject: [PATCH 006/244] Update vits.yaml with vits parameters --- examples/tts/conf/vits.yaml | 40 ++++++++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 9 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 179194ce3592..21770ce18f3b 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -9,7 +9,7 @@ test_datasets: null model: sample_rate: 22050 splice_length: 64 - lr: 3e-2 + lr: 2e-4 labels: ${labels} n_speakers: 1 symbols_embedding_dim: 384 @@ -18,6 +18,24 @@ model: pitch_embedding_kernel_size: 3 mel_loss_coeff: 40 hop_size: 256 + log_interval: 200 + eval_interval: 1000 + seed: 1234 + betas: [0.8,0.99] + eps: 1e-9 + lr_decay: 0.999875 + segment_size: 8192 + init_lr_ratio: 1 + warmup_epochs: 0 + c_mel: 45 + c_kl: 1. + inter_channels: 192 + hidden_channels: 192 + filter_channels: 768 + n_heads: 2 + p_dropout: 0.1 + n_layers_q: 3 + use_spectral_norm: false train_ds: manifest_filepath: ${train_dataset} @@ -30,6 +48,8 @@ model: shuffle: true batch_size: 64 num_workers: 12 + max_wav_value: 32768.0 + filter_length: 1024 validation_ds: manifest_filepath: ${validation_datasets} @@ -40,6 +60,8 @@ model: shuffle: false batch_size: 64 num_workers: 8 + max_wav_value: 32768.0 + filter_length: 1024 preprocessor: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures @@ -92,12 +114,12 @@ model: dropemb: 0.0 duration_predictor: - _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor + _target_: nemo.collections.tts.modules.vits_modules.StochasticDurationPredictor input_size: ${model.symbols_embedding_dim} kernel_size: 3 filter_size: 256 dropout: 0.1 - n_layers: 2 + n_layers: 6 pitch_predictor: _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor @@ -105,21 +127,21 @@ model: kernel_size: 3 filter_size: 256 dropout: 0.1 - n_layers: 2 + n_layers: 6 generator: _target_: nemo.collections.tts.modules.vits_modules.Generator - upsample_kernel_sizes: [16,16,4,4] - upsample_rates: [8,8,2,2] - upsample_initial_channel: 512 + resblock: 1 resblock_kernel_sizes: [3,7,11] resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] - resblock: 1 + upsample_rates: [8,8,2,2] + upsample_initial_channel: 512 + upsample_kernel_sizes: [16,16,4,4] initial_input_size: 384 trainer: gpus: -1 # number of gpus - max_epochs: 1500 + max_epochs: 20000 num_nodes: 1 accelerator: ddp accumulate_grad_batches: 1 From 3a6623e9fb67a0b7629d277bdf3bbdffcd5c7376 Mon Sep 17 00:00:00 2001 From: martynwei Date: Wed, 10 Nov 2021 12:00:32 -0500 Subject: [PATCH 007/244] Remove extra space --- nemo/collections/tts/modules/vits_modules.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 18bec37a8f90..180ad0b2e2bd 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -799,7 +799,6 @@ def forward(self, y, y_hat): return y_d_rs, y_d_gs, fmap_rs, fmap_gs - class SynthesizerTrn(nn.Module): """ Synthesizer for Training From 3968edf087fec1cd7c39343e2ea4c987923b9a2a Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 10 Nov 2021 12:18:08 -0500 Subject: [PATCH 008/244] Add top level training script --- examples/tts/vits.py | 62 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 examples/tts/vits.py diff --git a/examples/tts/vits.py b/examples/tts/vits.py new file mode 100644 index 000000000000..9000f4d696c8 --- /dev/null +++ b/examples/tts/vits.py @@ -0,0 +1,62 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytorch_lightning as pl +from pytorch_lightning.plugins import DDPPlugin + +from nemo.collections.common.callbacks import LogEpochTimeCallback +from nemo.collections.tts.models.vits import Vits +from nemo.core.config import hydra_runner +from nemo.utils.exp_manager import exp_manager + + +@hydra_runner(config_path="conf", config_name="vits") +def main(cfg): + trainer = pl.Trainer(**cfg.trainer) + exp_manager(trainer, cfg.get("exp_manager", None)) + model = Vits(cfg=cfg.model, trainer=trainer) + lr_logger = pl.callbacks.LearningRateMonitor() + epoch_time_logger = LogEpochTimeCallback() + trainer.callbacks.extend([lr_logger, epoch_time_logger]) + trainer.fit(model) + + """ + load_checkpoint = True + if load_checkpoint: + print('Loading from checkpoint') + model, _, _, _ = utils.load_checkpoint("vits_lightning.ckpt", model) + + hps = utils.get_hparams() + collate_fn = TextAudioCollate() + train_dataset = TextAudioLoader(hps.data.training_files, hps.data) + eval_dataset = TextAudioLoader(hps.data.validation_files, hps.data) + train_sampler = DistributedBucketSampler( + train_dataset, + hps.train.batch_size, + [32, 300, 400, 500, 600, 700, 800, 900, 1000], + shuffle=True) + + train_loader = DataLoader(train_dataset, num_workers=8, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler) + eval_loader = DataLoader(eval_dataset, num_workers=8, shuffle=False, batch_size=hps.train.batch_size, pin_memory=True, drop_last=False, collate_fn=collate_fn) + + trainer = Trainer(gpus=1, max_epochs=1) + ljspeech = VITSDataModule() + + trainer.fit(model, train_loader, eval_loader) + trainer.save_checkpoint("vits_lightning.ckpt") + """ + + +if __name__ == '__main__': + main() # noqa pylint: disable=no-value-for-parameter From b8b5fabf1deb33ec3e4466c77286675969555ecf Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 10 Nov 2021 12:18:50 -0500 Subject: [PATCH 009/244] Add some variables to vits yaml --- examples/tts/conf/vits.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 21770ce18f3b..10cb3389a3ba 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -36,6 +36,8 @@ model: p_dropout: 0.1 n_layers_q: 3 use_spectral_norm: false + mel_fmin: 0.0 + mel_fmax: null train_ds: manifest_filepath: ${train_dataset} From d9db52454ca975cf1d55f1ea549e832e2024f9ec Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 10 Nov 2021 12:19:34 -0500 Subject: [PATCH 010/244] Add forward and training methods --- nemo/collections/tts/models/vits.py | 99 ++++++++++++++++++++++++++++- 1 file changed, 97 insertions(+), 2 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index bfb45a3464fe..43243e62d914 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -119,8 +119,103 @@ def parse(self, str_input: str) -> torch.tensor: pass def configure_optimizers(self): - # TODO: Implement - pass + self.optim_g = torch.optim.AdamW( + self.net_g.parameters(), + self._cfg.model.lr, + betas=self._cfg.model.betas, + eps=self._cfg.model.eps) + self.optim_d = torch.optim.AdamW( + self.net_d.parameters(), + self._cfg.model.lr, + betas=self._cfg.model.betas, + eps=self._cfg.model.eps) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(self.optim_g, gamma=self._cfg.model.lr_decay) + scheduler_g_dict = { + 'scheduler': scheduler_g, + 'interval': 'step', + } + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(self.optim_d, gamma=self._cfg.model.lr_decay) + scheduler_d_dict = { + 'scheduler': scheduler_d, + 'interval': 'step' + } + return [self.optim_g, self.optim_d], [scheduler_g_dict, scheduler_d_dict] + + def forward(self, batch, batch_idx): + self.net_g.eval() + with torch.no_grad(): + (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch + + # remove else + x = x[:1] + x_lengths = x_lengths[:1] + + y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.model.hop_size + + self.net_g.train() + return y_hat[0, :, :y_hat_lengths[0]] + + def training_step(self, batch, batch_idx): + (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch + + with autocast(enabled=False): + y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ + (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) + mel = spec_to_mel_torch( + spec, + self._cfg.model.train_ds.filter_length, + self._cfg.model.n_mel_channels, + self._cfg.model.sample_rate, + self._cfg.model.mel_fmin, + self._cfg.model.mel_fmax + ) + y_mel = commons.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) + y_hat_mel = mel_spectrogram_torch( + y_hat.squeeze(1), + self._cfg.model.train_ds.filter_length, + self._cfg.model.n_mel_channels, + self._cfg.model.sample_rate, + self._cfg.model.hop_size, + self._cfg.model.preprocessing.n_window_size, + self._cfg.model.mel_fmin, + self._cfg.model.mel_fmax + ) + y = commons.slice_segments(y, ids_slice * self._cfg.model.hop_size, self._cfg.model.segment_size) # slice + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + loss_disc, losses_disc_r, losses_disc_g = DiscriminatorLoss(y_d_hat_r, y_d_hat_g) + loss_disc_all = loss_disc + + # train discriminator + self.optim_d.zero_grad() + self.manual_backward(loss_disc_all) + commons.clip_grad_value_(self.net_d.parameters(), None) + self.optim_d.step() + + with autocast(enabled=True): + # Generator + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + with autocast(enabled=False): + loss_dur = torch.sum(l_length.float()) + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.model.c_mel + loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.model.c_kl + + loss_fm = FeatureLoss(fmap_r, fmap_g) + loss_gen, losses_gen = GeneratorLoss(y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + # train generator + self.optim_g.zero_grad() + self.manual_backward(loss_gen_all) + commons.clip_grad_value_(self.net_g.parameters(), None) + self.optim_d.step() + + schedulers = self.lr_schedulers() + if schedulers is not None: + sch1, sch2 = schedulers + sch1.step() + sch2.step() @staticmethod def _loader(cfg): From 1b4db39a0256dd9c5a77ac2c1e0c091a1a931c32 Mon Sep 17 00:00:00 2001 From: martynwei Date: Thu, 11 Nov 2021 13:26:43 -0500 Subject: [PATCH 011/244] Fix imports --- nemo/collections/tts/models/vits.py | 11 ++--- nemo/collections/tts/modules/vits_modules.py | 42 ++++++++++++++++++++ 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 43243e62d914..1a0e75fa3683 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -5,6 +5,7 @@ import numpy as np import torch from hydra.utils import instantiate +import omegaconf from omegaconf import MISSING, DictConfig, OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger @@ -14,9 +15,9 @@ from torch.nn import functional as F import commons -import modules import attentions import monotonic_align +from torch.cuda.amp import autocast from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm @@ -86,7 +87,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.pitch_predictor = instantiate(cfg.pitch_predictor) self.generator = instantiate(cfg.generator) - self.multiperioddisc = MultiPeriodDiscriminator() + self.multiperioddisc = modules.MultiPeriodDiscriminator() self.feat_matching_loss = FeatureLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() @@ -163,7 +164,7 @@ def training_step(self, batch, batch_idx): with autocast(enabled=False): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) - mel = spec_to_mel_torch( + mel = modules.spec_to_mel_torch( spec, self._cfg.model.train_ds.filter_length, self._cfg.model.n_mel_channels, @@ -172,7 +173,7 @@ def training_step(self, batch, batch_idx): self._cfg.model.mel_fmax ) y_mel = commons.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) - y_hat_mel = mel_spectrogram_torch( + y_hat_mel = modules.mel_spectrogram_torch( y_hat.squeeze(1), self._cfg.model.train_ds.filter_length, self._cfg.model.n_mel_channels, @@ -199,7 +200,7 @@ def training_step(self, batch, batch_idx): with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.model.c_mel - loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.model.c_kl + loss_kl = KlLoss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.model.c_kl loss_fm = FeatureLoss(fmap_r, fmap_g) loss_gen, losses_gen = GeneratorLoss(y_d_hat_g) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 180ad0b2e2bd..dba9bdda87aa 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -943,3 +943,45 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) o_hat = self.dec(z_hat * y_mask, g=g_tgt) return o_hat, y_mask, (z, z_p, z_hat) + + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + + return spec From 316a04bb162f963e9890cfddf16c08a460046601 Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Thu, 11 Nov 2021 13:43:40 -0500 Subject: [PATCH 012/244] Added validation step --- nemo/collections/tts/models/vits.py | 58 +++++++++++++++++++++++++++-- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 1a0e75fa3683..634927e7a48e 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -8,7 +8,7 @@ import omegaconf from omegaconf import MISSING, DictConfig, OmegaConf from pytorch_lightning import Trainer -from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger, WandbLogger import math import torch from torch import nn @@ -43,6 +43,12 @@ import nemo.collections.tts.modules.vits_modules as modules +HAVE_WANDB = True +try: + import wandb +except ModuleNotFoundError: + HAVE_WANDB = False + @dataclass class VitsConfig: parser: Dict[Any, Any] = MISSING @@ -79,7 +85,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Ensure passed cfg is compliant with schema OmegaConf.merge(cfg, schema) - self.preprocessor = instantiate(cfg.preprocessor) + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor) self.melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True) self.encoder = instantiate(cfg.input_fft) @@ -144,7 +150,6 @@ def configure_optimizers(self): return [self.optim_g, self.optim_d], [scheduler_g_dict, scheduler_d_dict] def forward(self, batch, batch_idx): - self.net_g.eval() with torch.no_grad(): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -155,7 +160,6 @@ def forward(self, batch, batch_idx): y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.model.hop_size - self.net_g.train() return y_hat[0, :, :y_hat_lengths[0]] def training_step(self, batch, batch_idx): @@ -218,6 +222,52 @@ def training_step(self, batch, batch_idx): sch1.step() sch2.step() + def validation_step(self, batch, batch_idx): + (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch + + y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self.hps.data.hop_length + + # Note to modify the functions / use the ones in NeMo, we need the lengths + mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + + loss_mel = F.l1_loss(mel, y_hat_mel) + + self.log_dict({"val_loss": loss_mel}, on_epoch=True, sync_dist=True) + + # plot audio once per epoch + if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB: + clips = [] + specs = [] + + for i in range(min(5, y.shape[0])): + clips += [ + wandb.Audio( + y[i, : y_lengths[i]].data.cpu().numpy(), + caption=f"real audio {i}", + sample_rate=self.hps.data.sampling_rate, + ), + wandb.Audio( + y_hat[i, : y_hat_lengths[i]].data.cpu().numpy().astype('float32'), + caption=f"generated audio {i}", + sample_rate=self.hps.data.sampling_rate, + ), + ] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[i, :, : y_hat_mel_lengths[i]].data.cpu().numpy()), + caption=f"output mel {i}", + ), + wandb.Image( + plot_spectrogram_to_numpy(mel[i, :, : mel_lengths[i]].cpu().numpy()), + caption=f"gt mel {i}", + ), + ] + + self.logger.experiment.log({"audio": clips, "specs": specs}) + @staticmethod def _loader(cfg): try: From 27c76136a24aeb534c0a45d67636571d97cacfac Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Thu, 11 Nov 2021 13:54:13 -0500 Subject: [PATCH 013/244] Log training losses --- nemo/collections/tts/models/vits.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 634927e7a48e..af3ef4fe4e7e 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -222,6 +222,20 @@ def training_step(self, batch, batch_idx): sch1.step() sch2.step() + metrics = { + "loss_gen": loss_gen, + "loss_fm": loss_fm, + "loss_mel * c_mel": loss_mel, + "loss_dur": loss_dur, + "loss_kl * c_kl": loss_kl, + "loss_gen_all": loss_gen_all, + "losses_disc_r": losses_disc_r, + "losses_disc_g": losses_disc_g, + "loss_disc_all": loss_disc_all, + } + self.log_dict(metrics, on_step=True, sync_dist=True) + self.log("scaled loss_mel", loss_mel, prog_bar=True, logger=False, sync_dist=True) + def validation_step(self, batch, batch_idx): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch From 41d99a956049464bcce4a9a09fb1bbc46bb91825 Mon Sep 17 00:00:00 2001 From: martynwei Date: Thu, 11 Nov 2021 13:58:09 -0500 Subject: [PATCH 014/244] Update loss calls to use class attributes --- nemo/collections/tts/models/vits.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index af3ef4fe4e7e..8c06958827c2 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -97,6 +97,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.feat_matching_loss = FeatureLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() + self.kl_loss = KlLoss() self.max_token_duration = cfg.max_token_duration @@ -189,7 +190,7 @@ def training_step(self, batch, batch_idx): ) y = commons.slice_segments(y, ids_slice * self._cfg.model.hop_size, self._cfg.model.segment_size) # slice y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) - loss_disc, losses_disc_r, losses_disc_g = DiscriminatorLoss(y_d_hat_r, y_d_hat_g) + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc # train discriminator @@ -204,10 +205,10 @@ def training_step(self, batch, batch_idx): with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.model.c_mel - loss_kl = KlLoss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.model.c_kl + loss_kl = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.model.c_kl - loss_fm = FeatureLoss(fmap_r, fmap_g) - loss_gen, losses_gen = GeneratorLoss(y_d_hat_g) + loss_fm = self.feat_matching_loss(fmap_r, fmap_g) + loss_gen, losses_gen = self.gen_loss(y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl # train generator From d55d24ec4bfa89bfe3c92b38f5164d8a324e2409 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 14:25:43 -0500 Subject: [PATCH 015/244] Add VITS to models list --- nemo/collections/tts/models/__init__.py | 2 ++ nemo/collections/tts/models/vits.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/nemo/collections/tts/models/__init__.py b/nemo/collections/tts/models/__init__.py index 1d7e91220c8f..d95038b53a10 100644 --- a/nemo/collections/tts/models/__init__.py +++ b/nemo/collections/tts/models/__init__.py @@ -30,6 +30,7 @@ from nemo.collections.tts.models.uniglow import UniGlowModel from nemo.collections.tts.models.waveglow import WaveGlowModel from nemo.collections.tts.models.mixer_tts import MixerTTSModel + from nemo.collections.tts.models.vits import VitsModel except ModuleNotFoundError: pass @@ -55,4 +56,5 @@ "FastSpeech2HifiGanE2EModel", "AlignerModel", "MixerTTSModel", + "VitsModel", ] diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 8c06958827c2..8839eef6efa5 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -59,7 +59,7 @@ class VitsConfig: pitch_predictor: Dict[Any, Any] = MISSING -class Vits(TextToWaveform): +class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) From 0686e9af373dad6def235ef26adf9d4d5d35957a Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 15:26:11 -0500 Subject: [PATCH 016/244] Fix all imports --- nemo/collections/tts/models/vits.py | 22 +- .../tts/modules/monotonic_align/__init__.py | 19 + .../tts/modules/monotonic_align/core.c | 21299 ++++++++++++++++ .../tts/modules/monotonic_align/core.pyx | 42 + .../tts/modules/monotonic_align/setup.py | 9 + nemo/collections/tts/modules/vits_modules.py | 679 +- 6 files changed, 22044 insertions(+), 26 deletions(-) create mode 100644 nemo/collections/tts/modules/monotonic_align/__init__.py create mode 100644 nemo/collections/tts/modules/monotonic_align/core.c create mode 100644 nemo/collections/tts/modules/monotonic_align/core.pyx create mode 100644 nemo/collections/tts/modules/monotonic_align/setup.py diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 8839eef6efa5..645cec378464 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -9,19 +9,14 @@ from omegaconf import MISSING, DictConfig, OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger, WandbLogger -import math import torch from torch import nn from torch.nn import functional as F -import commons -import attentions -import monotonic_align from torch.cuda.amp import autocast from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding from nemo.collections.asr.data.audio_to_text import FastPitchDataset from nemo.collections.common.parts.preprocessing import parsers @@ -41,6 +36,7 @@ from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss import nemo.collections.tts.modules.vits_modules as modules +from nemo.collections.tts.modules.vits_modules import init_weights, get_padding, SynthesizerTrn, MultiPeriodDiscriminator HAVE_WANDB = True @@ -122,6 +118,14 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.sample_rate = cfg.sample_rate self.hop_size = cfg.hop_size + self.net_g = SynthesizerTrn( + # len(symbols), + self.hps.data.filter_length // 2 + 1, + self.hps.train.segment_size // self.hps.data.hop_length, + **self.hps.model) + self.net_d = MultiPeriodDiscriminator(self.hps.model.use_spectral_norm) + self.automatic_optimization = False + def parse(self, str_input: str) -> torch.tensor: # TODO: Implement pass @@ -177,7 +181,7 @@ def training_step(self, batch, batch_idx): self._cfg.model.mel_fmin, self._cfg.model.mel_fmax ) - y_mel = commons.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) + y_mel = modules.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) y_hat_mel = modules.mel_spectrogram_torch( y_hat.squeeze(1), self._cfg.model.train_ds.filter_length, @@ -188,7 +192,7 @@ def training_step(self, batch, batch_idx): self._cfg.model.mel_fmin, self._cfg.model.mel_fmax ) - y = commons.slice_segments(y, ids_slice * self._cfg.model.hop_size, self._cfg.model.segment_size) # slice + y = modules.slice_segments(y, ids_slice * self._cfg.model.hop_size, self._cfg.model.segment_size) # slice y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc @@ -196,7 +200,7 @@ def training_step(self, batch, batch_idx): # train discriminator self.optim_d.zero_grad() self.manual_backward(loss_disc_all) - commons.clip_grad_value_(self.net_d.parameters(), None) + modules.clip_grad_value_(self.net_d.parameters(), None) self.optim_d.step() with autocast(enabled=True): @@ -214,7 +218,7 @@ def training_step(self, batch, batch_idx): # train generator self.optim_g.zero_grad() self.manual_backward(loss_gen_all) - commons.clip_grad_value_(self.net_g.parameters(), None) + modules.clip_grad_value_(self.net_g.parameters(), None) self.optim_d.step() schedulers = self.lr_schedulers() diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py new file mode 100644 index 000000000000..3d7009c40fea --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -0,0 +1,19 @@ +import numpy as np +import torch +from .monotonic_align.core import maximum_path_c + + +def maximum_path(neg_cent, mask): + """ Cython optimized version. + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) + + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/nemo/collections/tts/modules/monotonic_align/core.c b/nemo/collections/tts/modules/monotonic_align/core.c new file mode 100644 index 000000000000..5631d20a9a00 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/core.c @@ -0,0 +1,21299 @@ +/* Generated by Cython 0.29.21 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "name": "monotonic_align.core", + "sources": [ + "core.pyx" + ] + }, + "module_name": "monotonic_align.core" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_21" +#define CYTHON_HEX_VERSION 0x001D15F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__monotonic_align__core +#define __PYX_HAVE_API__monotonic_align__core +/* Early includes */ +#include "pythread.h" +#include +#include +#include +#include "pystate.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "core.pyx", + "stringsource", +}; +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* MemviewSliceStruct.proto */ +struct __pyx_memoryview_obj; +typedef struct { + struct __pyx_memoryview_obj *memview; + char *data; + Py_ssize_t shape[8]; + Py_ssize_t strides[8]; + Py_ssize_t suboffsets[8]; +} __Pyx_memviewslice; +#define __Pyx_MemoryView_Len(m) (m.shape[0]) + +/* Atomics.proto */ +#include +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +#define __pyx_atomic_int_type int +#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ + !defined(__i386__) + #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type LONG + #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 + #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using Intel atomics" + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif +typedef volatile __pyx_atomic_int_type __pyx_atomic_int; +#if CYTHON_ATOMICS + #define __pyx_add_acquisition_count(memview)\ + __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) +#else + #define __pyx_add_acquisition_count(memview)\ + __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) +#endif + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/*--- Type declarations ---*/ +struct __pyx_array_obj; +struct __pyx_MemviewEnum_obj; +struct __pyx_memoryview_obj; +struct __pyx_memoryviewslice_obj; +struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; + +/* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ +struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { + int __pyx_n; + float max_neg_val; +}; + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ +struct __pyx_array_obj { + PyObject_HEAD + struct __pyx_vtabstruct_array *__pyx_vtab; + char *data; + Py_ssize_t len; + char *format; + int ndim; + Py_ssize_t *_shape; + Py_ssize_t *_strides; + Py_ssize_t itemsize; + PyObject *mode; + PyObject *_format; + void (*callback_free_data)(void *); + int free_data; + int dtype_is_object; +}; + + +/* "View.MemoryView":279 + * + * @cname('__pyx_MemviewEnum') + * cdef class Enum(object): # <<<<<<<<<<<<<< + * cdef object name + * def __init__(self, name): + */ +struct __pyx_MemviewEnum_obj { + PyObject_HEAD + PyObject *name; +}; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ +struct __pyx_memoryview_obj { + PyObject_HEAD + struct __pyx_vtabstruct_memoryview *__pyx_vtab; + PyObject *obj; + PyObject *_size; + PyObject *_array_interface; + PyThread_type_lock lock; + __pyx_atomic_int acquisition_count[2]; + __pyx_atomic_int *acquisition_count_aligned_p; + Py_buffer view; + int flags; + int dtype_is_object; + __Pyx_TypeInfo *typeinfo; +}; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ +struct __pyx_memoryviewslice_obj { + struct __pyx_memoryview_obj __pyx_base; + __Pyx_memviewslice from_slice; + PyObject *from_object; + PyObject *(*to_object_func)(char *); + int (*to_dtype_func)(char *, PyObject *); +}; + + + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ + +struct __pyx_vtabstruct_array { + PyObject *(*get_memview)(struct __pyx_array_obj *); +}; +static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ + +struct __pyx_vtabstruct_memoryview { + char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); + PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); +}; +static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ + +struct __pyx_vtabstruct__memoryviewslice { + struct __pyx_vtabstruct_memoryview __pyx_base; +}; +static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* MemviewSliceInit.proto */ +#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d +#define __Pyx_MEMVIEW_DIRECT 1 +#define __Pyx_MEMVIEW_PTR 2 +#define __Pyx_MEMVIEW_FULL 4 +#define __Pyx_MEMVIEW_CONTIG 8 +#define __Pyx_MEMVIEW_STRIDED 16 +#define __Pyx_MEMVIEW_FOLLOW 32 +#define __Pyx_IS_C_CONTIG 1 +#define __Pyx_IS_F_CONTIG 2 +static int __Pyx_init_memviewslice( + struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference); +static CYTHON_INLINE int __pyx_add_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) +#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) +#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) +#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) +static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* None.proto */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* IncludeStringH.proto */ +#include + +/* BytesEquals.proto */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* StrEquals.proto */ +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + +/* None.proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); + +/* UnaryNegOverflows.proto */ +#define UNARY_NEG_WOULD_OVERFLOW(x)\ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) + +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* decode_c_string_utf16.proto */ +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/* decode_c_string.proto */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* ListExtend.proto */ +static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { +#if CYTHON_COMPILING_IN_CPYTHON + PyObject* none = _PyList_Extend((PyListObject*)L, v); + if (unlikely(!none)) + return -1; + Py_DECREF(none); + return 0; +#else + return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); +#endif +} + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* HasAttr.proto */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable); + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +/* MemviewSliceIsContig.proto */ +static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); + +/* OverlappingSlices.proto */ +static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize); + +/* Capsule.proto */ +static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* TypeInfoCompare.proto */ +static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); + +/* MemviewSliceValidateAndInit.proto */ +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* MemviewSliceCopyTemplate.proto */ +static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ + +/* Module declarations from 'cython.view' */ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'monotonic_align.core' */ +static PyTypeObject *__pyx_array_type = 0; +static PyTypeObject *__pyx_MemviewEnum_type = 0; +static PyTypeObject *__pyx_memoryview_type = 0; +static PyTypeObject *__pyx_memoryviewslice_type = 0; +static PyObject *generic = 0; +static PyObject *strided = 0; +static PyObject *indirect = 0; +static PyObject *contiguous = 0; +static PyObject *indirect_contiguous = 0; +static int __pyx_memoryview_thread_locks_used; +static PyThread_type_lock __pyx_memoryview_thread_locks[8]; +static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ +static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ +static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ +static void *__pyx_align_pointer(void *, size_t); /*proto*/ +static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ +static PyObject *_unellipsify(PyObject *, int); /*proto*/ +static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ +static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ +static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ +static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ +static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ +static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ +static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ +static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ +static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; +static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "monotonic_align.core" +extern int __pyx_module_is_main_monotonic_align__core; +int __pyx_module_is_main_monotonic_align__core = 0; + +/* Implementation of 'monotonic_align.core' */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_enumerate; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_Ellipsis; +static PyObject *__pyx_builtin_id; +static PyObject *__pyx_builtin_IndexError; +static const char __pyx_k_O[] = "O"; +static const char __pyx_k_c[] = "c"; +static const char __pyx_k_id[] = "id"; +static const char __pyx_k_new[] = "__new__"; +static const char __pyx_k_obj[] = "obj"; +static const char __pyx_k_base[] = "base"; +static const char __pyx_k_dict[] = "__dict__"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mode[] = "mode"; +static const char __pyx_k_name[] = "name"; +static const char __pyx_k_ndim[] = "ndim"; +static const char __pyx_k_pack[] = "pack"; +static const char __pyx_k_size[] = "size"; +static const char __pyx_k_step[] = "step"; +static const char __pyx_k_stop[] = "stop"; +static const char __pyx_k_t_xs[] = "t_xs"; +static const char __pyx_k_t_ys[] = "t_ys"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_ASCII[] = "ASCII"; +static const char __pyx_k_class[] = "__class__"; +static const char __pyx_k_error[] = "error"; +static const char __pyx_k_flags[] = "flags"; +static const char __pyx_k_paths[] = "paths"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_start[] = "start"; +static const char __pyx_k_encode[] = "encode"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_name_2[] = "__name__"; +static const char __pyx_k_pickle[] = "pickle"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_struct[] = "struct"; +static const char __pyx_k_unpack[] = "unpack"; +static const char __pyx_k_update[] = "update"; +static const char __pyx_k_values[] = "values"; +static const char __pyx_k_fortran[] = "fortran"; +static const char __pyx_k_memview[] = "memview"; +static const char __pyx_k_Ellipsis[] = "Ellipsis"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_itemsize[] = "itemsize"; +static const char __pyx_k_pyx_type[] = "__pyx_type"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_enumerate[] = "enumerate"; +static const char __pyx_k_pyx_state[] = "__pyx_state"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_IndexError[] = "IndexError"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_pyx_result[] = "__pyx_result"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_PickleError[] = "PickleError"; +static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; +static const char __pyx_k_stringsource[] = "stringsource"; +static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; +static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; +static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; +static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_strided_and_direct[] = ""; +static const char __pyx_k_strided_and_indirect[] = ""; +static const char __pyx_k_contiguous_and_direct[] = ""; +static const char __pyx_k_MemoryView_of_r_object[] = ""; +static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; +static const char __pyx_k_contiguous_and_indirect[] = ""; +static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; +static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; +static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; +static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; +static const char __pyx_k_strided_and_direct_or_indirect[] = ""; +static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; +static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; +static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; +static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; +static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; +static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; +static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; +static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; +static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; +static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; +static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; +static PyObject *__pyx_n_s_ASCII; +static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; +static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; +static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; +static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; +static PyObject *__pyx_kp_s_Cannot_index_with_type_s; +static PyObject *__pyx_n_s_Ellipsis; +static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; +static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; +static PyObject *__pyx_n_s_IndexError; +static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; +static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; +static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; +static PyObject *__pyx_n_s_MemoryError; +static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; +static PyObject *__pyx_kp_s_MemoryView_of_r_object; +static PyObject *__pyx_n_b_O; +static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; +static PyObject *__pyx_n_s_PickleError; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_View_MemoryView; +static PyObject *__pyx_n_s_allocate_buffer; +static PyObject *__pyx_n_s_base; +static PyObject *__pyx_n_s_c; +static PyObject *__pyx_n_u_c; +static PyObject *__pyx_n_s_class; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_kp_s_contiguous_and_direct; +static PyObject *__pyx_kp_s_contiguous_and_indirect; +static PyObject *__pyx_n_s_dict; +static PyObject *__pyx_n_s_dtype_is_object; +static PyObject *__pyx_n_s_encode; +static PyObject *__pyx_n_s_enumerate; +static PyObject *__pyx_n_s_error; +static PyObject *__pyx_n_s_flags; +static PyObject *__pyx_n_s_format; +static PyObject *__pyx_n_s_fortran; +static PyObject *__pyx_n_u_fortran; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; +static PyObject *__pyx_n_s_id; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_itemsize; +static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_memview; +static PyObject *__pyx_n_s_mode; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_name_2; +static PyObject *__pyx_n_s_ndim; +static PyObject *__pyx_n_s_new; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_obj; +static PyObject *__pyx_n_s_pack; +static PyObject *__pyx_n_s_paths; +static PyObject *__pyx_n_s_pickle; +static PyObject *__pyx_n_s_pyx_PickleError; +static PyObject *__pyx_n_s_pyx_checksum; +static PyObject *__pyx_n_s_pyx_getbuffer; +static PyObject *__pyx_n_s_pyx_result; +static PyObject *__pyx_n_s_pyx_state; +static PyObject *__pyx_n_s_pyx_type; +static PyObject *__pyx_n_s_pyx_unpickle_Enum; +static PyObject *__pyx_n_s_pyx_vtable; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_shape; +static PyObject *__pyx_n_s_size; +static PyObject *__pyx_n_s_start; +static PyObject *__pyx_n_s_step; +static PyObject *__pyx_n_s_stop; +static PyObject *__pyx_kp_s_strided_and_direct; +static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; +static PyObject *__pyx_kp_s_strided_and_indirect; +static PyObject *__pyx_kp_s_stringsource; +static PyObject *__pyx_n_s_struct; +static PyObject *__pyx_n_s_t_xs; +static PyObject *__pyx_n_s_t_ys; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_kp_s_unable_to_allocate_array_data; +static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; +static PyObject *__pyx_n_s_unpack; +static PyObject *__pyx_n_s_update; +static PyObject *__pyx_n_s_values; +static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static PyObject *__pyx_int_184977713; +static PyObject *__pyx_int_neg_1; +static float __pyx_k_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_slice__16; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__14; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_tuple__18; +static PyObject *__pyx_tuple__19; +static PyObject *__pyx_tuple__20; +static PyObject *__pyx_tuple__21; +static PyObject *__pyx_tuple__22; +static PyObject *__pyx_tuple__23; +static PyObject *__pyx_tuple__24; +static PyObject *__pyx_tuple__25; +static PyObject *__pyx_codeobj__26; +/* Late includes */ + +/* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + +static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { + float __pyx_v_max_neg_val = __pyx_k_; + int __pyx_v_x; + int __pyx_v_y; + float __pyx_v_v_prev; + float __pyx_v_v_cur; + int __pyx_v_index; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + long __pyx_t_4; + int __pyx_t_5; + long __pyx_t_6; + long __pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + float __pyx_t_11; + float __pyx_t_12; + float __pyx_t_13; + int __pyx_t_14; + Py_ssize_t __pyx_t_15; + Py_ssize_t __pyx_t_16; + if (__pyx_optional_args) { + if (__pyx_optional_args->__pyx_n > 0) { + __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; + } + } + + /* "monotonic_align/core.pyx":13 + * cdef float v_cur + * cdef float tmp + * cdef int index = t_x - 1 # <<<<<<<<<<<<<< + * + * for y in range(t_y): + */ + __pyx_v_index = (__pyx_v_t_x - 1); + + /* "monotonic_align/core.pyx":15 + * cdef int index = t_x - 1 + * + * for y in range(t_y): # <<<<<<<<<<<<<< + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: + */ + __pyx_t_1 = __pyx_v_t_y; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_y = __pyx_t_3; + + /* "monotonic_align/core.pyx":16 + * + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< + * if x == y: + * v_cur = max_neg_val + */ + __pyx_t_4 = (__pyx_v_y + 1); + __pyx_t_5 = __pyx_v_t_x; + if (((__pyx_t_4 < __pyx_t_5) != 0)) { + __pyx_t_6 = __pyx_t_4; + } else { + __pyx_t_6 = __pyx_t_5; + } + __pyx_t_4 = __pyx_t_6; + __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); + __pyx_t_6 = 0; + if (((__pyx_t_5 > __pyx_t_6) != 0)) { + __pyx_t_7 = __pyx_t_5; + } else { + __pyx_t_7 = __pyx_t_6; + } + __pyx_t_6 = __pyx_t_4; + for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { + __pyx_v_x = __pyx_t_5; + + /* "monotonic_align/core.pyx":17 + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: # <<<<<<<<<<<<<< + * v_cur = max_neg_val + * else: + */ + __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":18 + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: + * v_cur = max_neg_val # <<<<<<<<<<<<<< + * else: + * v_cur = value[y-1, x] + */ + __pyx_v_v_cur = __pyx_v_max_neg_val; + + /* "monotonic_align/core.pyx":17 + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: # <<<<<<<<<<<<<< + * v_cur = max_neg_val + * else: + */ + goto __pyx_L7; + } + + /* "monotonic_align/core.pyx":20 + * v_cur = max_neg_val + * else: + * v_cur = value[y-1, x] # <<<<<<<<<<<<<< + * if x == 0: + * if y == 0: + */ + /*else*/ { + __pyx_t_9 = (__pyx_v_y - 1); + __pyx_t_10 = __pyx_v_x; + __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); + } + __pyx_L7:; + + /* "monotonic_align/core.pyx":21 + * else: + * v_cur = value[y-1, x] + * if x == 0: # <<<<<<<<<<<<<< + * if y == 0: + * v_prev = 0. + */ + __pyx_t_8 = ((__pyx_v_x == 0) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":22 + * v_cur = value[y-1, x] + * if x == 0: + * if y == 0: # <<<<<<<<<<<<<< + * v_prev = 0. + * else: + */ + __pyx_t_8 = ((__pyx_v_y == 0) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":23 + * if x == 0: + * if y == 0: + * v_prev = 0. # <<<<<<<<<<<<<< + * else: + * v_prev = max_neg_val + */ + __pyx_v_v_prev = 0.; + + /* "monotonic_align/core.pyx":22 + * v_cur = value[y-1, x] + * if x == 0: + * if y == 0: # <<<<<<<<<<<<<< + * v_prev = 0. + * else: + */ + goto __pyx_L9; + } + + /* "monotonic_align/core.pyx":25 + * v_prev = 0. + * else: + * v_prev = max_neg_val # <<<<<<<<<<<<<< + * else: + * v_prev = value[y-1, x-1] + */ + /*else*/ { + __pyx_v_v_prev = __pyx_v_max_neg_val; + } + __pyx_L9:; + + /* "monotonic_align/core.pyx":21 + * else: + * v_cur = value[y-1, x] + * if x == 0: # <<<<<<<<<<<<<< + * if y == 0: + * v_prev = 0. + */ + goto __pyx_L8; + } + + /* "monotonic_align/core.pyx":27 + * v_prev = max_neg_val + * else: + * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< + * value[y, x] += max(v_prev, v_cur) + * + */ + /*else*/ { + __pyx_t_10 = (__pyx_v_y - 1); + __pyx_t_9 = (__pyx_v_x - 1); + __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); + } + __pyx_L8:; + + /* "monotonic_align/core.pyx":28 + * else: + * v_prev = value[y-1, x-1] + * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< + * + * for y in range(t_y - 1, -1, -1): + */ + __pyx_t_11 = __pyx_v_v_cur; + __pyx_t_12 = __pyx_v_v_prev; + if (((__pyx_t_11 > __pyx_t_12) != 0)) { + __pyx_t_13 = __pyx_t_11; + } else { + __pyx_t_13 = __pyx_t_12; + } + __pyx_t_9 = __pyx_v_y; + __pyx_t_10 = __pyx_v_x; + *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; + } + } + + /* "monotonic_align/core.pyx":30 + * value[y, x] += max(v_prev, v_cur) + * + * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + */ + for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_y = __pyx_t_1; + + /* "monotonic_align/core.pyx":31 + * + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 # <<<<<<<<<<<<<< + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + * index = index - 1 + */ + __pyx_t_10 = __pyx_v_y; + __pyx_t_9 = __pyx_v_index; + *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; + + /* "monotonic_align/core.pyx":32 + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< + * index = index - 1 + * + */ + __pyx_t_14 = ((__pyx_v_index != 0) != 0); + if (__pyx_t_14) { + } else { + __pyx_t_8 = __pyx_t_14; + goto __pyx_L13_bool_binop_done; + } + __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); + if (!__pyx_t_14) { + } else { + __pyx_t_8 = __pyx_t_14; + goto __pyx_L13_bool_binop_done; + } + __pyx_t_9 = (__pyx_v_y - 1); + __pyx_t_10 = __pyx_v_index; + __pyx_t_15 = (__pyx_v_y - 1); + __pyx_t_16 = (__pyx_v_index - 1); + __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); + __pyx_t_8 = __pyx_t_14; + __pyx_L13_bool_binop_done:; + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":33 + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + * index = index - 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_index = (__pyx_v_index - 1); + + /* "monotonic_align/core.pyx":32 + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< + * index = index - 1 + * + */ + } + } + + /* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + + /* function exit code */ +} + +/* "monotonic_align/core.pyx":38 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< + * cdef int b = paths.shape[0] + * cdef int i + */ + +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { + CYTHON_UNUSED int __pyx_v_b; + int __pyx_v_i; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; + Py_ssize_t __pyx_t_6; + Py_ssize_t __pyx_t_7; + + /* "monotonic_align/core.pyx":39 + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< + * cdef int i + * for i in prange(b, nogil=True): + */ + __pyx_v_b = (__pyx_v_paths.shape[0]); + + /* "monotonic_align/core.pyx":41 + * cdef int b = paths.shape[0] + * cdef int i + * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + __pyx_t_1 = __pyx_v_b; + if ((1 == 0)) abort(); + { + #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) + #undef likely + #undef unlikely + #define likely(x) (x) + #define unlikely(x) (x) + #endif + __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; + if (__pyx_t_3 > 0) + { + #ifdef _OPENMP + #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) + #endif /* _OPENMP */ + { + #ifdef _OPENMP + #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) + #endif /* _OPENMP */ + for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ + { + __pyx_v_i = (int)(0 + 1 * __pyx_t_2); + + /* "monotonic_align/core.pyx":42 + * cdef int i + * for i in prange(b, nogil=True): + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< + */ + __pyx_t_4.data = __pyx_v_paths.data; + __pyx_t_4.memview = __pyx_v_paths.memview; + __PYX_INC_MEMVIEW(&__pyx_t_4, 0); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_i; + Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; + __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; +__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; + __pyx_t_4.suboffsets[0] = -1; + +__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; +__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; + __pyx_t_4.suboffsets[1] = -1; + +__pyx_t_5.data = __pyx_v_values.data; + __pyx_t_5.memview = __pyx_v_values.memview; + __PYX_INC_MEMVIEW(&__pyx_t_5, 0); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_i; + Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; + __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; +__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; + __pyx_t_5.suboffsets[0] = -1; + +__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; +__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; + __pyx_t_5.suboffsets[1] = -1; + +__pyx_t_6 = __pyx_v_i; + __pyx_t_7 = __pyx_v_i; + __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); + __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); + __pyx_t_4.memview = NULL; + __pyx_t_4.data = NULL; + __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); + __pyx_t_5.memview = NULL; + __pyx_t_5.data = NULL; + } + } + } + } + } + #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) + #undef likely + #undef unlikely + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) + #endif + } + + /* "monotonic_align/core.pyx":41 + * cdef int b = paths.shape[0] + * cdef int i + * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L5; + } + __pyx_L5:; + } + } + + /* "monotonic_align/core.pyx":38 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< + * cdef int b = paths.shape[0] + * cdef int i + */ + + /* function exit code */ +} + +/* Python wrapper */ +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("maximum_path_c", 0); + __Pyx_XDECREF(__pyx_r); + if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } + __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + +/* Python wrapper */ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_shape = 0; + Py_ssize_t __pyx_v_itemsize; + PyObject *__pyx_v_format = 0; + PyObject *__pyx_v_mode = 0; + int __pyx_v_allocate_buffer; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; + PyObject* values[5] = {0,0,0,0,0}; + values[3] = ((PyObject *)__pyx_n_s_c); + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); + if (value) { values[4] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_shape = ((PyObject*)values[0]); + __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_v_format = values[2]; + __pyx_v_mode = values[3]; + if (values[4]) { + __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) + } else { + + /* "View.MemoryView":123 + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< + * + * cdef int idx + */ + __pyx_v_allocate_buffer = ((int)1); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) + if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { + PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) + } + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { + int __pyx_v_idx; + Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_dim; + PyObject **__pyx_v_p; + char __pyx_v_order; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + Py_ssize_t __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + __Pyx_INCREF(__pyx_v_format); + + /* "View.MemoryView":129 + * cdef PyObject **p + * + * self.ndim = len(shape) # <<<<<<<<<<<<<< + * self.itemsize = itemsize + * + */ + if (unlikely(__pyx_v_shape == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 129, __pyx_L1_error) + } + __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) + __pyx_v_self->ndim = ((int)__pyx_t_1); + + /* "View.MemoryView":130 + * + * self.ndim = len(shape) + * self.itemsize = itemsize # <<<<<<<<<<<<<< + * + * if not self.ndim: + */ + __pyx_v_self->itemsize = __pyx_v_itemsize; + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 133, __pyx_L1_error) + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + } + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 136, __pyx_L1_error) + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + } + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_format); + __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":139 + * + * if not isinstance(format, bytes): + * format = format.encode('ASCII') # <<<<<<<<<<<<<< + * self._format = format # keep a reference to the byte string + * self.format = self._format + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + } + + /* "View.MemoryView":140 + * if not isinstance(format, bytes): + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< + * self.format = self._format + * + */ + if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) + __pyx_t_3 = __pyx_v_format; + __Pyx_INCREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_v_self->_format); + __Pyx_DECREF(__pyx_v_self->_format); + __pyx_v_self->_format = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":141 + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + * self.format = self._format # <<<<<<<<<<<<<< + * + * + */ + if (unlikely(__pyx_v_self->_format == Py_None)) { + PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); + __PYX_ERR(1, 141, __pyx_L1_error) + } + __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) + __pyx_v_self->format = __pyx_t_7; + + /* "View.MemoryView":144 + * + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< + * self._strides = self._shape + self.ndim + * + */ + __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); + + /* "View.MemoryView":145 + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< + * + * if not self._shape: + */ + __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 148, __pyx_L1_error) + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + } + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + __pyx_t_8 = 0; + __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; + for (;;) { + if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_9; + __pyx_v_idx = __pyx_t_8; + __pyx_t_8 = (__pyx_t_8 + 1); + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":153 + * for idx, dim in enumerate(shape): + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< + * self._shape[idx] = dim + * + */ + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 153, __pyx_L1_error) + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + } + + /* "View.MemoryView":154 + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim # <<<<<<<<<<<<<< + * + * cdef char order + */ + (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) + if (__pyx_t_4) { + + /* "View.MemoryView":158 + * cdef char order + * if mode == 'fortran': + * order = b'F' # <<<<<<<<<<<<<< + * self.mode = u'fortran' + * elif mode == 'c': + */ + __pyx_v_order = 'F'; + + /* "View.MemoryView":159 + * if mode == 'fortran': + * order = b'F' + * self.mode = u'fortran' # <<<<<<<<<<<<<< + * elif mode == 'c': + * order = b'C' + */ + __Pyx_INCREF(__pyx_n_u_fortran); + __Pyx_GIVEREF(__pyx_n_u_fortran); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_fortran; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) + if (likely(__pyx_t_4)) { + + /* "View.MemoryView":161 + * self.mode = u'fortran' + * elif mode == 'c': + * order = b'C' # <<<<<<<<<<<<<< + * self.mode = u'c' + * else: + */ + __pyx_v_order = 'C'; + + /* "View.MemoryView":162 + * elif mode == 'c': + * order = b'C' + * self.mode = u'c' # <<<<<<<<<<<<<< + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + */ + __Pyx_INCREF(__pyx_n_u_c); + __Pyx_GIVEREF(__pyx_n_u_c); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_c; + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":164 + * self.mode = u'c' + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< + * + * self.len = fill_contig_strides_array(self._shape, self._strides, + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 164, __pyx_L1_error) + } + __pyx_L10:; + + /* "View.MemoryView":166 + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + * + * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< + * itemsize, self.ndim, order) + * + */ + __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); + + /* "View.MemoryView":169 + * itemsize, self.ndim, order) + * + * self.free_data = allocate_buffer # <<<<<<<<<<<<<< + * self.dtype_is_object = format == b'O' + * if allocate_buffer: + */ + __pyx_v_self->free_data = __pyx_v_allocate_buffer; + + /* "View.MemoryView":170 + * + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< + * if allocate_buffer: + * + */ + __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_v_self->dtype_is_object = __pyx_t_4; + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = (__pyx_v_allocate_buffer != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":174 + * + * + * self.data = malloc(self.len) # <<<<<<<<<<<<<< + * if not self.data: + * raise MemoryError("unable to allocate array data.") + */ + __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 176, __pyx_L1_error) + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":179 + * + * if self.dtype_is_object: + * p = self.data # <<<<<<<<<<<<<< + * for i in range(self.len / itemsize): + * p[i] = Py_None + */ + __pyx_v_p = ((PyObject **)__pyx_v_self->data); + + /* "View.MemoryView":180 + * if self.dtype_is_object: + * p = self.data + * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< + * p[i] = Py_None + * Py_INCREF(Py_None) + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); + __pyx_t_9 = __pyx_t_1; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { + __pyx_v_i = __pyx_t_11; + + /* "View.MemoryView":181 + * p = self.data + * for i in range(self.len / itemsize): + * p[i] = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + (__pyx_v_p[__pyx_v_i]) = Py_None; + + /* "View.MemoryView":182 + * for i in range(self.len / itemsize): + * p[i] = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + Py_INCREF(Py_None); + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + } + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_format); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_bufmode; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + char *__pyx_t_4; + Py_ssize_t __pyx_t_5; + int __pyx_t_6; + Py_ssize_t *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":186 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 # <<<<<<<<<<<<<< + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = -1; + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":188 + * cdef int bufmode = -1 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + goto __pyx_L3; + } + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":190 + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + */ + __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + } + __pyx_L3:; + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 192, __pyx_L1_error) + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + } + + /* "View.MemoryView":193 + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data # <<<<<<<<<<<<<< + * info.len = self.len + * info.ndim = self.ndim + */ + __pyx_t_4 = __pyx_v_self->data; + __pyx_v_info->buf = __pyx_t_4; + + /* "View.MemoryView":194 + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + * info.len = self.len # <<<<<<<<<<<<<< + * info.ndim = self.ndim + * info.shape = self._shape + */ + __pyx_t_5 = __pyx_v_self->len; + __pyx_v_info->len = __pyx_t_5; + + /* "View.MemoryView":195 + * info.buf = self.data + * info.len = self.len + * info.ndim = self.ndim # <<<<<<<<<<<<<< + * info.shape = self._shape + * info.strides = self._strides + */ + __pyx_t_6 = __pyx_v_self->ndim; + __pyx_v_info->ndim = __pyx_t_6; + + /* "View.MemoryView":196 + * info.len = self.len + * info.ndim = self.ndim + * info.shape = self._shape # <<<<<<<<<<<<<< + * info.strides = self._strides + * info.suboffsets = NULL + */ + __pyx_t_7 = __pyx_v_self->_shape; + __pyx_v_info->shape = __pyx_t_7; + + /* "View.MemoryView":197 + * info.ndim = self.ndim + * info.shape = self._shape + * info.strides = self._strides # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = self.itemsize + */ + __pyx_t_7 = __pyx_v_self->_strides; + __pyx_v_info->strides = __pyx_t_7; + + /* "View.MemoryView":198 + * info.shape = self._shape + * info.strides = self._strides + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = self.itemsize + * info.readonly = 0 + */ + __pyx_v_info->suboffsets = NULL; + + /* "View.MemoryView":199 + * info.strides = self._strides + * info.suboffsets = NULL + * info.itemsize = self.itemsize # <<<<<<<<<<<<<< + * info.readonly = 0 + * + */ + __pyx_t_5 = __pyx_v_self->itemsize; + __pyx_v_info->itemsize = __pyx_t_5; + + /* "View.MemoryView":200 + * info.suboffsets = NULL + * info.itemsize = self.itemsize + * info.readonly = 0 # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + __pyx_v_info->readonly = 0; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":203 + * + * if flags & PyBUF_FORMAT: + * info.format = self.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_4 = __pyx_v_self->format; + __pyx_v_info->format = __pyx_t_4; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":205 + * info.format = self.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.obj = self + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L5:; + + /* "View.MemoryView":207 + * info.format = NULL + * + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + +/* Python wrapper */ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":213 + * def __dealloc__(array self): + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) # <<<<<<<<<<<<<< + * elif self.free_data: + * if self.dtype_is_object: + */ + __pyx_v_self->callback_free_data(__pyx_v_self->data); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + __pyx_t_1 = (__pyx_v_self->free_data != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":216 + * elif self.free_data: + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< + * self._strides, self.ndim, False) + * free(self.data) + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + } + + /* "View.MemoryView":218 + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + * free(self.data) # <<<<<<<<<<<<<< + * PyObject_Free(self._shape) + * + */ + free(__pyx_v_self->data); + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + } + __pyx_L3:; + + /* "View.MemoryView":219 + * self._strides, self.ndim, False) + * free(self.data) + * PyObject_Free(self._shape) # <<<<<<<<<<<<<< + * + * @property + */ + PyObject_Free(__pyx_v_self->_shape); + + /* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":223 + * @property + * def memview(self): + * return self.get_memview() # <<<<<<<<<<<<<< + * + * @cname('get_memview') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_memview", 0); + + /* "View.MemoryView":227 + * @cname('get_memview') + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< + * return memoryview(self, flags, self.dtype_is_object) + * + */ + __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); + + /* "View.MemoryView":228 + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":231 + * + * def __len__(self): + * return self._shape[0] # <<<<<<<<<<<<<< + * + * def __getattr__(self, attr): + */ + __pyx_r = (__pyx_v_self->_shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getattr__", 0); + + /* "View.MemoryView":234 + * + * def __getattr__(self, attr): + * return getattr(self.memview, attr) # <<<<<<<<<<<<<< + * + * def __getitem__(self, item): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":237 + * + * def __getitem__(self, item): + * return self.memview[item] # <<<<<<<<<<<<<< + * + * def __setitem__(self, item, value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + +/* Python wrapper */ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + + /* "View.MemoryView":240 + * + * def __setitem__(self, item, value): + * self.memview[item] = value # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + +static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { + struct __pyx_array_obj *__pyx_v_result = 0; + struct __pyx_array_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("array_cwrapper", 0); + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":249 + * + * if buf == NULL: + * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); + __pyx_t_2 = 0; + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + /*else*/ { + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); + __pyx_t_4 = 0; + __pyx_t_5 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":252 + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) # <<<<<<<<<<<<<< + * result.data = buf + * + */ + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "View.MemoryView":253 + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) + * result.data = buf # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->data = __pyx_v_buf; + } + __pyx_L3:; + + /* "View.MemoryView":255 + * result.data = buf + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + +/* Python wrapper */ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_name = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_name = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__", 0); + + /* "View.MemoryView":282 + * cdef object name + * def __init__(self, name): + * self.name = name # <<<<<<<<<<<<<< + * def __repr__(self): + * return self.name + */ + __Pyx_INCREF(__pyx_v_name); + __Pyx_GIVEREF(__pyx_v_name); + __Pyx_GOTREF(__pyx_v_self->name); + __Pyx_DECREF(__pyx_v_self->name); + __pyx_v_self->name = __pyx_v_name; + + /* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + +/* Python wrapper */ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":284 + * self.name = name + * def __repr__(self): + * return self.name # <<<<<<<<<<<<<< + * + * cdef generic = Enum("") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->name); + __pyx_r = __pyx_v_self->name; + goto __pyx_L0; + + /* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_v_state = 0; + PyObject *__pyx_v__dict = 0; + int __pyx_v_use_setstate; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":5 + * cdef object _dict + * cdef bint use_setstate + * state = (self.name,) # <<<<<<<<<<<<<< + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_self->name); + __Pyx_GIVEREF(__pyx_v_self->name); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); + __pyx_v_state = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "(tree fragment)":6 + * cdef bint use_setstate + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< + * if _dict is not None: + * state += (_dict,) + */ + __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v__dict = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + __pyx_t_2 = (__pyx_v__dict != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":8 + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + * state += (_dict,) # <<<<<<<<<<<<<< + * use_setstate = True + * else: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v__dict); + __Pyx_GIVEREF(__pyx_v__dict); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); + __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "(tree fragment)":9 + * if _dict is not None: + * state += (_dict,) + * use_setstate = True # <<<<<<<<<<<<<< + * else: + * use_setstate = self.name is not None + */ + __pyx_v_use_setstate = 1; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + goto __pyx_L3; + } + + /* "(tree fragment)":11 + * use_setstate = True + * else: + * use_setstate = self.name is not None # <<<<<<<<<<<<<< + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_self->name != Py_None); + __pyx_v_use_setstate = __pyx_t_3; + } + __pyx_L3:; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + __pyx_t_3 = (__pyx_v_use_setstate != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":13 + * use_setstate = self.name is not None + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); + __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); + __pyx_t_4 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + } + + /* "(tree fragment)":15 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); + __pyx_t_5 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + } + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_state); + __Pyx_XDECREF(__pyx_v__dict); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":17 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) + __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + +static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { + Py_intptr_t __pyx_v_aligned_p; + size_t __pyx_v_offset; + void *__pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":300 + * cdef void *align_pointer(void *memory, size_t alignment) nogil: + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< + * cdef size_t offset + * + */ + __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); + + /* "View.MemoryView":304 + * + * with cython.cdivision(True): + * offset = aligned_p % alignment # <<<<<<<<<<<<<< + * + * if offset > 0: + */ + __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + __pyx_t_1 = ((__pyx_v_offset > 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":307 + * + * if offset > 0: + * aligned_p += alignment - offset # <<<<<<<<<<<<<< + * + * return aligned_p + */ + __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + } + + /* "View.MemoryView":309 + * aligned_p += alignment - offset + * + * return aligned_p # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = ((void *)__pyx_v_aligned_p); + goto __pyx_L0; + + /* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + +/* Python wrapper */ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_obj = 0; + int __pyx_v_flags; + int __pyx_v_dtype_is_object; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); + if (value) { values[2] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_obj = values[0]; + __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + if (values[2]) { + __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + __pyx_v_dtype_is_object = ((int)0); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "View.MemoryView":346 + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj # <<<<<<<<<<<<<< + * self.flags = flags + * if type(self) is memoryview or obj is not None: + */ + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + __Pyx_GOTREF(__pyx_v_self->obj); + __Pyx_DECREF(__pyx_v_self->obj); + __pyx_v_self->obj = __pyx_v_obj; + + /* "View.MemoryView":347 + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj + * self.flags = flags # <<<<<<<<<<<<<< + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + */ + __pyx_v_self->flags = __pyx_v_flags; + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); + __pyx_t_3 = (__pyx_t_2 != 0); + if (!__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_3 = (__pyx_v_obj != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "View.MemoryView":349 + * self.flags = flags + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + */ + __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":351 + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; + + /* "View.MemoryView":352 + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * global __pyx_memoryview_thread_locks_used + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + } + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + } + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":356 + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + */ + __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + + /* "View.MemoryView":357 + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":359 + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< + * if self.lock is NULL: + * raise MemoryError + */ + __pyx_v_self->lock = PyThread_allocate_lock(); + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":361 + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + } + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":364 + * + * if flags & PyBUF_FORMAT: + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< + * else: + * self.dtype_is_object = dtype_is_object + */ + __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_self->dtype_is_object = __pyx_t_1; + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + goto __pyx_L10; + } + + /* "View.MemoryView":366 + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + */ + /*else*/ { + __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; + } + __pyx_L10:; + + /* "View.MemoryView":368 + * self.dtype_is_object = dtype_is_object + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL + */ + __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); + + /* "View.MemoryView":370 + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL # <<<<<<<<<<<<<< + * + * def __dealloc__(memoryview self): + */ + __pyx_v_self->typeinfo = NULL; + + /* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + +/* Python wrapper */ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { + int __pyx_v_i; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyThread_type_lock __pyx_t_6; + PyThread_type_lock __pyx_t_7; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + __pyx_t_1 = (__pyx_v_self->obj != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":374 + * def __dealloc__(memoryview self): + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + */ + __Pyx_ReleaseBuffer((&__pyx_v_self->view)); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":377 + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< + * Py_DECREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; + + /* "View.MemoryView":378 + * + * (<__pyx_buffer *> &self.view).obj = NULL + * Py_DECREF(Py_None) # <<<<<<<<<<<<<< + * + * cdef int i + */ + Py_DECREF(Py_None); + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + } + __pyx_L3:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":383 + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + */ + __pyx_t_3 = __pyx_memoryview_thread_locks_used; + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":385 + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":388 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< + * break + * else: + */ + __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); + + /* "View.MemoryView":387 + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break + */ + (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; + (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + } + + /* "View.MemoryView":389 + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break # <<<<<<<<<<<<<< + * else: + * PyThread_free_lock(self.lock) + */ + goto __pyx_L6_break; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + } + } + /*else*/ { + + /* "View.MemoryView":391 + * break + * else: + * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + */ + PyThread_free_lock(__pyx_v_self->lock); + } + __pyx_L6_break:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + } + + /* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + Py_ssize_t __pyx_v_dim; + char *__pyx_v_itemp; + PyObject *__pyx_v_idx = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + Py_ssize_t __pyx_t_6; + char *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_item_pointer", 0); + + /* "View.MemoryView":395 + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< + * + * for dim, idx in enumerate(index): + */ + __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + __pyx_t_1 = 0; + if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { + __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } else { + if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 397, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_5); + } + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); + __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_1; + __pyx_t_1 = (__pyx_t_1 + 1); + + /* "View.MemoryView":398 + * + * for dim, idx in enumerate(index): + * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< + * + * return itemp + */ + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_7; + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":400 + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + * return itemp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_itemp; + goto __pyx_L0; + + /* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_indices = NULL; + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + char *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":405 + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: + * return self # <<<<<<<<<<<<<< + * + * have_slices, indices = _unellipsify(index, self.view.ndim) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __pyx_r = ((PyObject *)__pyx_v_self); + goto __pyx_L0; + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + } + + /* "View.MemoryView":407 + * return self + * + * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * cdef char *itemp + */ + __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (likely(__pyx_t_3 != Py_None)) { + PyObject* sequence = __pyx_t_3; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 407, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + #else + __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_v_indices = __pyx_t_5; + __pyx_t_5 = 0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) + if (__pyx_t_2) { + + /* "View.MemoryView":411 + * cdef char *itemp + * if have_slices: + * return memview_slice(self, indices) # <<<<<<<<<<<<<< + * else: + * itemp = self.get_item_pointer(indices) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + } + + /* "View.MemoryView":413 + * return memview_slice(self, indices) + * else: + * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< + * return self.convert_item_to_object(itemp) + * + */ + /*else*/ { + __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_6; + + /* "View.MemoryView":414 + * else: + * itemp = self.get_item_pointer(indices) + * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< + * + * def __setitem__(memoryview self, object index, object value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_indices); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + +/* Python wrapper */ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_obj = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + __Pyx_INCREF(__pyx_v_index); + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + __pyx_t_1 = (__pyx_v_self->view.readonly != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 418, __pyx_L1_error) + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + } + + /* "View.MemoryView":420 + * raise TypeError("Cannot assign to read-only memoryview") + * + * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * if have_slices: + */ + __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(__pyx_t_2 != Py_None)) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 420, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_3; + __pyx_t_3 = 0; + __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":423 + * + * if have_slices: + * obj = self.is_slice(value) # <<<<<<<<<<<<<< + * if obj: + * self.setitem_slice_assignment(self[index], obj) + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_obj = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":425 + * obj = self.is_slice(value) + * if obj: + * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< + * else: + * self.setitem_slice_assign_scalar(self[index], value) + */ + __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":427 + * self.setitem_slice_assignment(self[index], obj) + * else: + * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< + * else: + * self.setitem_indexed(index, value) + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L5:; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":429 + * self.setitem_slice_assign_scalar(self[index], value) + * else: + * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< + * + * cdef is_slice(self, obj): + */ + /*else*/ { + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L4:; + + /* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_slice", 0); + __Pyx_INCREF(__pyx_v_obj); + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + /*try:*/ { + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":435 + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) # <<<<<<<<<<<<<< + * except TypeError: + * return None + */ + __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); + __pyx_t_6 = 0; + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L9_try_end; + __pyx_L4_error:; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "View.MemoryView":436 + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + * except TypeError: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); + if (__pyx_t_9) { + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GOTREF(__pyx_t_8); + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":437 + * self.dtype_is_object) + * except TypeError: + * return None # <<<<<<<<<<<<<< + * + * return obj + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_except_return; + } + goto __pyx_L6_except_error; + __pyx_L6_except_error:; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L1_error; + __pyx_L7_except_return:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L0; + __pyx_L9_try_end:; + } + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + } + + /* "View.MemoryView":439 + * return None + * + * return obj # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assignment(self, dst, src): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { + __Pyx_memviewslice __pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_src_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + __Pyx_memviewslice *__pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":446 + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< + * src.ndim, dst.ndim, self.dtype_is_object) + * + */ + if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) + __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) + + /* "View.MemoryView":447 + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { + int __pyx_v_array[0x80]; + void *__pyx_v_tmp; + void *__pyx_v_item; + __Pyx_memviewslice *__pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_tmp_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + char const *__pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); + + /* "View.MemoryView":451 + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + * cdef int array[128] + * cdef void *tmp = NULL # <<<<<<<<<<<<<< + * cdef void *item + * + */ + __pyx_v_tmp = NULL; + + /* "View.MemoryView":456 + * cdef __Pyx_memviewslice *dst_slice + * cdef __Pyx_memviewslice tmp_slice + * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< + * + * if self.view.itemsize > sizeof(array): + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) + __pyx_v_dst_slice = __pyx_t_1; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":459 + * + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< + * if tmp == NULL: + * raise MemoryError + */ + __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":461 + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * item = tmp + * else: + */ + PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + } + + /* "View.MemoryView":462 + * if tmp == NULL: + * raise MemoryError + * item = tmp # <<<<<<<<<<<<<< + * else: + * item = array + */ + __pyx_v_item = __pyx_v_tmp; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":464 + * item = tmp + * else: + * item = array # <<<<<<<<<<<<<< + * + * try: + */ + /*else*/ { + __pyx_v_item = ((void *)__pyx_v_array); + } + __pyx_L3:; + + /* "View.MemoryView":466 + * item = array + * + * try: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * ( item)[0] = value + */ + /*try:*/ { + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":468 + * try: + * if self.dtype_is_object: + * ( item)[0] = value # <<<<<<<<<<<<<< + * else: + * self.assign_item_from_object( item, value) + */ + (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":470 + * ( item)[0] = value + * else: + * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L8:; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":475 + * + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + * item, self.dtype_is_object) + */ + __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + } + + /* "View.MemoryView":476 + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< + * item, self.dtype_is_object) + * finally: + */ + __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); + } + + /* "View.MemoryView":479 + * item, self.dtype_is_object) + * finally: + * PyMem_Free(tmp) # <<<<<<<<<<<<<< + * + * cdef setitem_indexed(self, index, value): + */ + /*finally:*/ { + /*normal exit:*/{ + PyMem_Free(__pyx_v_tmp); + goto __pyx_L7; + } + __pyx_L6_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; + { + PyMem_Free(__pyx_v_tmp); + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); + } + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_XGIVEREF(__pyx_t_8); + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; + goto __pyx_L1_error; + } + __pyx_L7:; + } + + /* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_indexed", 0); + + /* "View.MemoryView":482 + * + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< + * self.assign_item_from_object(itemp, value) + * + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_1; + + /* "View.MemoryView":483 + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_v_struct = NULL; + PyObject *__pyx_v_bytesitem = 0; + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + PyObject *__pyx_t_9 = NULL; + size_t __pyx_t_10; + int __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":488 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef bytes bytesitem + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":491 + * cdef bytes bytesitem + * + * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< + * try: + * result = struct.unpack(self.view.format, bytesitem) + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "View.MemoryView":493 + * bytesitem = itemp[:self.view.itemsize] + * try: + * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< + * except struct.error: + * raise ValueError("Unable to convert item to object") + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + __pyx_t_8 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_8 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + { + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_9); + if (__pyx_t_7) { + __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; + } + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); + __Pyx_INCREF(__pyx_v_bytesitem); + __Pyx_GIVEREF(__pyx_v_bytesitem); + PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); + __pyx_t_6 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + } + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + /*else:*/ { + __pyx_t_10 = strlen(__pyx_v_self->view.format); + __pyx_t_11 = ((__pyx_t_10 == 1) != 0); + if (__pyx_t_11) { + + /* "View.MemoryView":498 + * else: + * if len(self.view.format) == 1: + * return result[0] # <<<<<<<<<<<<<< + * return result + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L6_except_return; + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + } + + /* "View.MemoryView":499 + * if len(self.view.format) == 1: + * return result[0] + * return result # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_result); + __pyx_r = __pyx_v_result; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "View.MemoryView":494 + * try: + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: # <<<<<<<<<<<<<< + * raise ValueError("Unable to convert item to object") + * else: + */ + __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); + __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; + if (__pyx_t_8) { + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_1); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 495, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesitem); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_v_struct = NULL; + char __pyx_v_c; + PyObject *__pyx_v_bytesvalue = 0; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + char *__pyx_t_11; + char *__pyx_t_12; + char *__pyx_t_13; + char *__pyx_t_14; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":504 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef char c + * cdef bytes bytesvalue + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + __pyx_t_2 = PyTuple_Check(__pyx_v_value); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "View.MemoryView":510 + * + * if isinstance(value, tuple): + * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< + * else: + * bytesvalue = struct.pack(self.view.format, value) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":512 + * bytesvalue = struct.pack(self.view.format, *value) + * else: + * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< + * + * for i, c in enumerate(bytesvalue): + */ + /*else*/ { + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + { + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); + __Pyx_INCREF(__pyx_v_value); + __Pyx_GIVEREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); + __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = 0; + if (unlikely(__pyx_v_bytesvalue == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); + __PYX_ERR(1, 514, __pyx_L1_error) + } + __Pyx_INCREF(__pyx_v_bytesvalue); + __pyx_t_10 = __pyx_v_bytesvalue; + __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); + __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); + for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { + __pyx_t_11 = __pyx_t_14; + __pyx_v_c = (__pyx_t_11[0]); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + __pyx_v_i = __pyx_t_9; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = (__pyx_t_9 + 1); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; + } + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesvalue); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + char *__pyx_t_5; + void *__pyx_t_6; + int __pyx_t_7; + Py_ssize_t __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_self->view.readonly != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 520, __pyx_L1_error) + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + } + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":523 + * + * if flags & PyBUF_ND: + * info.shape = self.view.shape # <<<<<<<<<<<<<< + * else: + * info.shape = NULL + */ + __pyx_t_4 = __pyx_v_self->view.shape; + __pyx_v_info->shape = __pyx_t_4; + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":525 + * info.shape = self.view.shape + * else: + * info.shape = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_STRIDES: + */ + /*else*/ { + __pyx_v_info->shape = NULL; + } + __pyx_L6:; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":528 + * + * if flags & PyBUF_STRIDES: + * info.strides = self.view.strides # <<<<<<<<<<<<<< + * else: + * info.strides = NULL + */ + __pyx_t_4 = __pyx_v_self->view.strides; + __pyx_v_info->strides = __pyx_t_4; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + goto __pyx_L7; + } + + /* "View.MemoryView":530 + * info.strides = self.view.strides + * else: + * info.strides = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_INDIRECT: + */ + /*else*/ { + __pyx_v_info->strides = NULL; + } + __pyx_L7:; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":533 + * + * if flags & PyBUF_INDIRECT: + * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< + * else: + * info.suboffsets = NULL + */ + __pyx_t_4 = __pyx_v_self->view.suboffsets; + __pyx_v_info->suboffsets = __pyx_t_4; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":535 + * info.suboffsets = self.view.suboffsets + * else: + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + /*else*/ { + __pyx_v_info->suboffsets = NULL; + } + __pyx_L8:; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":538 + * + * if flags & PyBUF_FORMAT: + * info.format = self.view.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_5 = __pyx_v_self->view.format; + __pyx_v_info->format = __pyx_t_5; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":540 + * info.format = self.view.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.buf = self.view.buf + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L9:; + + /* "View.MemoryView":542 + * info.format = NULL + * + * info.buf = self.view.buf # <<<<<<<<<<<<<< + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + */ + __pyx_t_6 = __pyx_v_self->view.buf; + __pyx_v_info->buf = __pyx_t_6; + + /* "View.MemoryView":543 + * + * info.buf = self.view.buf + * info.ndim = self.view.ndim # <<<<<<<<<<<<<< + * info.itemsize = self.view.itemsize + * info.len = self.view.len + */ + __pyx_t_7 = __pyx_v_self->view.ndim; + __pyx_v_info->ndim = __pyx_t_7; + + /* "View.MemoryView":544 + * info.buf = self.view.buf + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< + * info.len = self.view.len + * info.readonly = self.view.readonly + */ + __pyx_t_8 = __pyx_v_self->view.itemsize; + __pyx_v_info->itemsize = __pyx_t_8; + + /* "View.MemoryView":545 + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + * info.len = self.view.len # <<<<<<<<<<<<<< + * info.readonly = self.view.readonly + * info.obj = self + */ + __pyx_t_8 = __pyx_v_self->view.len; + __pyx_v_info->len = __pyx_t_8; + + /* "View.MemoryView":546 + * info.itemsize = self.view.itemsize + * info.len = self.view.len + * info.readonly = self.view.readonly # <<<<<<<<<<<<<< + * info.obj = self + * + */ + __pyx_t_1 = __pyx_v_self->view.readonly; + __pyx_v_info->readonly = __pyx_t_1; + + /* "View.MemoryView":547 + * info.len = self.view.len + * info.readonly = self.view.readonly + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":554 + * @property + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< + * transpose_memslice(&result.from_slice) + * return result + */ + __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":555 + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) + + /* "View.MemoryView":556 + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + * return result # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":560 + * @property + * def base(self): + * return self.obj # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->obj); + __pyx_r = __pyx_v_self->obj; + goto __pyx_L0; + + /* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_length; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":564 + * @property + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_length = (__pyx_t_2[0]); + __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_stride; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 570, __pyx_L1_error) + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + } + + /* "View.MemoryView":572 + * raise ValueError("Buffer view does not expose strides") + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_v_stride = (__pyx_t_3[0]); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + Py_ssize_t *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + } + + /* "View.MemoryView":579 + * return (-1,) * self.view.ndim + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); + for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { + __pyx_t_4 = __pyx_t_6; + __pyx_v_suboffset = (__pyx_t_4[0]); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":583 + * @property + * def ndim(self): + * return self.view.ndim # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":587 + * @property + * def itemsize(self): + * return self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":591 + * @property + * def nbytes(self): + * return self.size * self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + __pyx_t_1 = (__pyx_v_self->_size == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":596 + * def size(self): + * if self._size is None: + * result = 1 # <<<<<<<<<<<<<< + * + * for length in self.view.shape[:self.view.ndim]: + */ + __Pyx_INCREF(__pyx_int_1); + __pyx_v_result = __pyx_int_1; + + /* "View.MemoryView":598 + * result = 1 + * + * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< + * result *= length + * + */ + __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); + __pyx_t_6 = 0; + + /* "View.MemoryView":599 + * + * for length in self.view.shape[:self.view.ndim]: + * result *= length # <<<<<<<<<<<<<< + * + * self._size = result + */ + __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); + __pyx_t_6 = 0; + } + + /* "View.MemoryView":601 + * result *= length + * + * self._size = result # <<<<<<<<<<<<<< + * + * return self._size + */ + __Pyx_INCREF(__pyx_v_result); + __Pyx_GIVEREF(__pyx_v_result); + __Pyx_GOTREF(__pyx_v_self->_size); + __Pyx_DECREF(__pyx_v_self->_size); + __pyx_v_self->_size = __pyx_v_result; + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + } + + /* "View.MemoryView":603 + * self._size = result + * + * return self._size # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->_size); + __pyx_r = __pyx_v_self->_size; + goto __pyx_L0; + + /* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":607 + * def __len__(self): + * if self.view.ndim >= 1: + * return self.view.shape[0] # <<<<<<<<<<<<<< + * + * return 0 + */ + __pyx_r = (__pyx_v_self->view.shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + } + + /* "View.MemoryView":609 + * return self.view.shape[0] + * + * return 0 # <<<<<<<<<<<<<< + * + * def __repr__(self): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":613 + * def __repr__(self): + * return "" % (self.base.__class__.__name__, + * id(self)) # <<<<<<<<<<<<<< + * + * def __str__(self): + */ + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__str__", 0); + + /* "View.MemoryView":616 + * + * def __str__(self): + * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_c_contig", 0); + + /* "View.MemoryView":622 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":623 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< + * + * def is_f_contig(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_f_contig", 0); + + /* "View.MemoryView":628 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":629 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< + * + * def copy(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_mslice; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy", 0); + + /* "View.MemoryView":633 + * def copy(self): + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &mslice) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); + + /* "View.MemoryView":635 + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + * + * slice_copy(self, &mslice) # <<<<<<<<<<<<<< + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); + + /* "View.MemoryView":636 + * + * slice_copy(self, &mslice) + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_C_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":641 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< + * + * def copy_fortran(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy_fortran", 0); + + /* "View.MemoryView":645 + * def copy_fortran(self): + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &src) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); + + /* "View.MemoryView":647 + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + * + * slice_copy(self, &src) # <<<<<<<<<<<<<< + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); + + /* "View.MemoryView":648 + * + * slice_copy(self, &src) + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_F_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) + __pyx_v_dst = __pyx_t_1; + + /* "View.MemoryView":653 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + +static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { + struct __pyx_memoryview_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); + + /* "View.MemoryView":658 + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< + * result.typeinfo = typeinfo + * return result + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_o); + __Pyx_GIVEREF(__pyx_v_o); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":659 + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_v_result->typeinfo = __pyx_v_typeinfo; + + /* "View.MemoryView":660 + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_check') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("memoryview_check", 0); + + /* "View.MemoryView":664 + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): + * return isinstance(o, memoryview) # <<<<<<<<<<<<<< + * + * cdef tuple _unellipsify(object index, int ndim): + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + +static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { + PyObject *__pyx_v_tup = NULL; + PyObject *__pyx_v_result = NULL; + int __pyx_v_have_slices; + int __pyx_v_seen_ellipsis; + CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; + PyObject *__pyx_v_item = NULL; + Py_ssize_t __pyx_v_nslices; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + PyObject *(*__pyx_t_6)(PyObject *); + PyObject *__pyx_t_7 = NULL; + Py_ssize_t __pyx_t_8; + int __pyx_t_9; + int __pyx_t_10; + PyObject *__pyx_t_11 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_unellipsify", 0); + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + __pyx_t_1 = PyTuple_Check(__pyx_v_index); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":672 + * """ + * if not isinstance(index, tuple): + * tup = (index,) # <<<<<<<<<<<<<< + * else: + * tup = index + */ + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_index); + __Pyx_GIVEREF(__pyx_v_index); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); + __pyx_v_tup = __pyx_t_3; + __pyx_t_3 = 0; + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":674 + * tup = (index,) + * else: + * tup = index # <<<<<<<<<<<<<< + * + * result = [] + */ + /*else*/ { + __Pyx_INCREF(__pyx_v_index); + __pyx_v_tup = __pyx_v_index; + } + __pyx_L3:; + + /* "View.MemoryView":676 + * tup = index + * + * result = [] # <<<<<<<<<<<<<< + * have_slices = False + * seen_ellipsis = False + */ + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_result = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":677 + * + * result = [] + * have_slices = False # <<<<<<<<<<<<<< + * seen_ellipsis = False + * for idx, item in enumerate(tup): + */ + __pyx_v_have_slices = 0; + + /* "View.MemoryView":678 + * result = [] + * have_slices = False + * seen_ellipsis = False # <<<<<<<<<<<<<< + * for idx, item in enumerate(tup): + * if item is Ellipsis: + */ + __pyx_v_seen_ellipsis = 0; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + __Pyx_INCREF(__pyx_int_0); + __pyx_t_3 = __pyx_int_0; + if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { + __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; + __pyx_t_6 = NULL; + } else { + __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_6)) { + if (likely(PyList_CheckExact(__pyx_t_4))) { + if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } else { + if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } + } else { + __pyx_t_7 = __pyx_t_6(__pyx_t_4); + if (unlikely(!__pyx_t_7)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 679, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_7); + } + __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_INCREF(__pyx_t_3); + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); + __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_3); + __pyx_t_3 = __pyx_t_7; + __pyx_t_7 = 0; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":683 + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True # <<<<<<<<<<<<<< + * else: + * result.append(slice(None)) + */ + __pyx_v_seen_ellipsis = 1; + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + goto __pyx_L7; + } + + /* "View.MemoryView":685 + * seen_ellipsis = True + * else: + * result.append(slice(None)) # <<<<<<<<<<<<<< + * have_slices = True + * else: + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":686 + * else: + * result.append(slice(None)) + * have_slices = True # <<<<<<<<<<<<<< + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + */ + __pyx_v_have_slices = 1; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + goto __pyx_L6; + } + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + /*else*/ { + __pyx_t_2 = PySlice_Check(__pyx_v_item); + __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); + __pyx_t_1 = __pyx_t_10; + __pyx_L9_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":689 + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< + * + * have_slices = have_slices or isinstance(item, slice) + */ + __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(1, 689, __pyx_L1_error) + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + } + + /* "View.MemoryView":691 + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< + * result.append(item) + * + */ + __pyx_t_10 = (__pyx_v_have_slices != 0); + if (!__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = PySlice_Check(__pyx_v_item); + __pyx_t_2 = (__pyx_t_10 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_have_slices = __pyx_t_1; + + /* "View.MemoryView":692 + * + * have_slices = have_slices or isinstance(item, slice) + * result.append(item) # <<<<<<<<<<<<<< + * + * nslices = ndim - len(result) + */ + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) + } + __pyx_L6:; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":694 + * result.append(item) + * + * nslices = ndim - len(result) # <<<<<<<<<<<<<< + * if nslices: + * result.extend([slice(None)] * nslices) + */ + __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) + __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + __pyx_t_1 = (__pyx_v_nslices != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":696 + * nslices = ndim - len(result) + * if nslices: + * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< + * + * return have_slices or nslices, tuple(result) + */ + __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + } + + /* "View.MemoryView":698 + * result.extend([slice(None)] * nslices) + * + * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + */ + __Pyx_XDECREF(__pyx_r); + if (!__pyx_v_have_slices) { + } else { + __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L14_bool_binop_done; + } + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_L14_bool_binop_done:; + __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_r = ((PyObject*)__pyx_t_11); + __pyx_t_11 = 0; + goto __pyx_L0; + + /* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_tup); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_XDECREF(__pyx_v_item); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + +static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); + + /* "View.MemoryView":701 + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") + */ + __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); + for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { + __pyx_t_1 = __pyx_t_3; + __pyx_v_suboffset = (__pyx_t_1[0]); + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(1, 703, __pyx_L1_error) + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + } + } + + /* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { + int __pyx_v_new_ndim; + int __pyx_v_suboffset_dim; + int __pyx_v_dim; + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + __Pyx_memviewslice *__pyx_v_p_src; + struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; + __Pyx_memviewslice *__pyx_v_p_dst; + int *__pyx_v_p_suboffset_dim; + Py_ssize_t __pyx_v_start; + Py_ssize_t __pyx_v_stop; + Py_ssize_t __pyx_v_step; + int __pyx_v_have_start; + int __pyx_v_have_stop; + int __pyx_v_have_step; + PyObject *__pyx_v_index = NULL; + struct __pyx_memoryview_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + struct __pyx_memoryview_obj *__pyx_t_4; + char *__pyx_t_5; + int __pyx_t_6; + Py_ssize_t __pyx_t_7; + PyObject *(*__pyx_t_8)(PyObject *); + PyObject *__pyx_t_9 = NULL; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + Py_ssize_t __pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memview_slice", 0); + + /* "View.MemoryView":711 + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): + * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< + * cdef bint negative_step + * cdef __Pyx_memviewslice src, dst + */ + __pyx_v_new_ndim = 0; + __pyx_v_suboffset_dim = -1; + + /* "View.MemoryView":718 + * + * + * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< + * + * cdef _memoryviewslice memviewsliceobj + */ + (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); + + /* "View.MemoryView":722 + * cdef _memoryviewslice memviewsliceobj + * + * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(1, 722, __pyx_L1_error) + } + } + #endif + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":725 + * + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview # <<<<<<<<<<<<<< + * p_src = &memviewsliceobj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":726 + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, &src) + */ + __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + goto __pyx_L3; + } + + /* "View.MemoryView":728 + * p_src = &memviewsliceobj.from_slice + * else: + * slice_copy(memview, &src) # <<<<<<<<<<<<<< + * p_src = &src + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); + + /* "View.MemoryView":729 + * else: + * slice_copy(memview, &src) + * p_src = &src # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_p_src = (&__pyx_v_src); + } + __pyx_L3:; + + /* "View.MemoryView":735 + * + * + * dst.memview = p_src.memview # <<<<<<<<<<<<<< + * dst.data = p_src.data + * + */ + __pyx_t_4 = __pyx_v_p_src->memview; + __pyx_v_dst.memview = __pyx_t_4; + + /* "View.MemoryView":736 + * + * dst.memview = p_src.memview + * dst.data = p_src.data # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_v_p_src->data; + __pyx_v_dst.data = __pyx_t_5; + + /* "View.MemoryView":741 + * + * + * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< + * cdef int *p_suboffset_dim = &suboffset_dim + * cdef Py_ssize_t start, stop, step + */ + __pyx_v_p_dst = (&__pyx_v_dst); + + /* "View.MemoryView":742 + * + * cdef __Pyx_memviewslice *p_dst = &dst + * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< + * cdef Py_ssize_t start, stop, step + * cdef bint have_start, have_stop, have_step + */ + __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + __pyx_t_6 = 0; + if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { + __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; + __pyx_t_8 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_8)) { + if (likely(PyList_CheckExact(__pyx_t_3))) { + if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } else { + if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } + } else { + __pyx_t_9 = __pyx_t_8(__pyx_t_3); + if (unlikely(!__pyx_t_9)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 746, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_9); + } + __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); + __pyx_t_9 = 0; + __pyx_v_dim = __pyx_t_6; + __pyx_t_6 = (__pyx_t_6 + 1); + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":751 + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< + * 0, 0, 0, # have_{start,stop,step} + * False) + */ + __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) + + /* "View.MemoryView":748 + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + goto __pyx_L6; + } + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + __pyx_t_2 = (__pyx_v_index == Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":755 + * False) + * elif index is None: + * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + */ + (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; + + /* "View.MemoryView":756 + * elif index is None: + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 + */ + (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; + + /* "View.MemoryView":757 + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< + * new_ndim += 1 + * else: + */ + (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; + + /* "View.MemoryView":758 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 # <<<<<<<<<<<<<< + * else: + * start = index.start or 0 + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + goto __pyx_L6; + } + + /* "View.MemoryView":760 + * new_ndim += 1 + * else: + * start = index.start or 0 # <<<<<<<<<<<<<< + * stop = index.stop or 0 + * step = index.step or 0 + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L7_bool_binop_done:; + __pyx_v_start = __pyx_t_10; + + /* "View.MemoryView":761 + * else: + * start = index.start or 0 + * stop = index.stop or 0 # <<<<<<<<<<<<<< + * step = index.step or 0 + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L9_bool_binop_done:; + __pyx_v_stop = __pyx_t_10; + + /* "View.MemoryView":762 + * start = index.start or 0 + * stop = index.stop or 0 + * step = index.step or 0 # <<<<<<<<<<<<<< + * + * have_start = index.start is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L11_bool_binop_done:; + __pyx_v_step = __pyx_t_10; + + /* "View.MemoryView":764 + * step = index.step or 0 + * + * have_start = index.start is not None # <<<<<<<<<<<<<< + * have_stop = index.stop is not None + * have_step = index.step is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_start = __pyx_t_1; + + /* "View.MemoryView":765 + * + * have_start = index.start is not None + * have_stop = index.stop is not None # <<<<<<<<<<<<<< + * have_step = index.step is not None + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_stop = __pyx_t_1; + + /* "View.MemoryView":766 + * have_start = index.start is not None + * have_stop = index.stop is not None + * have_step = index.step is not None # <<<<<<<<<<<<<< + * + * slice_memviewslice( + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_step = __pyx_t_1; + + /* "View.MemoryView":768 + * have_step = index.step is not None + * + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) + + /* "View.MemoryView":774 + * have_start, have_stop, have_step, + * True) + * new_ndim += 1 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + } + __pyx_L6:; + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":778 + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< + * memviewsliceobj.to_dtype_func, + * memview.dtype_is_object) + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } + + /* "View.MemoryView":779 + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * else: + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + } + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + /*else*/ { + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":783 + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { + Py_ssize_t __pyx_v_new_shape; + int __pyx_v_negative_step; + int __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + __pyx_t_1 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":830 + * + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + } + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + __pyx_t_1 = (0 <= __pyx_v_start); + if (__pyx_t_1) { + __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); + } + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":832 + * start += shape + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< + * else: + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + } + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":835 + * else: + * + * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< + * + * if have_step and step == 0: + */ + /*else*/ { + __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step < 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L6_bool_binop_done:; + __pyx_v_negative_step = __pyx_t_2; + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + __pyx_t_1 = (__pyx_v_have_step != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step == 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L9_bool_binop_done:; + if (__pyx_t_2) { + + /* "View.MemoryView":838 + * + * if have_step and step == 0: + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + } + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + __pyx_t_2 = (__pyx_v_have_start != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":843 + * if have_start: + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if start < 0: + * start = 0 + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":845 + * start += shape + * if start < 0: + * start = 0 # <<<<<<<<<<<<<< + * elif start >= shape: + * if negative_step: + */ + __pyx_v_start = 0; + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + } + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + goto __pyx_L12; + } + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":848 + * elif start >= shape: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = shape + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L14; + } + + /* "View.MemoryView":850 + * start = shape - 1 + * else: + * start = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + /*else*/ { + __pyx_v_start = __pyx_v_shape; + } + __pyx_L14:; + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + } + __pyx_L12:; + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + goto __pyx_L11; + } + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":853 + * else: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = 0 + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L15; + } + + /* "View.MemoryView":855 + * start = shape - 1 + * else: + * start = 0 # <<<<<<<<<<<<<< + * + * if have_stop: + */ + /*else*/ { + __pyx_v_start = 0; + } + __pyx_L15:; + } + __pyx_L11:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + __pyx_t_2 = (__pyx_v_have_stop != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":859 + * if have_stop: + * if stop < 0: + * stop += shape # <<<<<<<<<<<<<< + * if stop < 0: + * stop = 0 + */ + __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":861 + * stop += shape + * if stop < 0: + * stop = 0 # <<<<<<<<<<<<<< + * elif stop > shape: + * stop = shape + */ + __pyx_v_stop = 0; + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + } + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + goto __pyx_L17; + } + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":863 + * stop = 0 + * elif stop > shape: + * stop = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + __pyx_v_stop = __pyx_v_shape; + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + } + __pyx_L17:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + goto __pyx_L16; + } + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":866 + * else: + * if negative_step: + * stop = -1 # <<<<<<<<<<<<<< + * else: + * stop = shape + */ + __pyx_v_stop = -1L; + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + goto __pyx_L19; + } + + /* "View.MemoryView":868 + * stop = -1 + * else: + * stop = shape # <<<<<<<<<<<<<< + * + * if not have_step: + */ + /*else*/ { + __pyx_v_stop = __pyx_v_shape; + } + __pyx_L19:; + } + __pyx_L16:; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":871 + * + * if not have_step: + * step = 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_step = 1; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + } + + /* "View.MemoryView":875 + * + * with cython.cdivision(True): + * new_shape = (stop - start) // step # <<<<<<<<<<<<<< + * + * if (stop - start) - step * new_shape: + */ + __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":878 + * + * if (stop - start) - step * new_shape: + * new_shape += 1 # <<<<<<<<<<<<<< + * + * if new_shape < 0: + */ + __pyx_v_new_shape = (__pyx_v_new_shape + 1); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + } + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":881 + * + * if new_shape < 0: + * new_shape = 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_new_shape = 0; + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + } + + /* "View.MemoryView":884 + * + * + * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset + */ + (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); + + /* "View.MemoryView":885 + * + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< + * dst.suboffsets[new_ndim] = suboffset + * + */ + (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; + + /* "View.MemoryView":886 + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; + } + __pyx_L3:; + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":890 + * + * if suboffset_dim[0] < 0: + * dst.data += start * stride # <<<<<<<<<<<<<< + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride + */ + __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + goto __pyx_L23; + } + + /* "View.MemoryView":892 + * dst.data += start * stride + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< + * + * if suboffset >= 0: + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_suboffset_dim[0]); + (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); + } + __pyx_L23:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":897 + * if not is_slice: + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + */ + __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + goto __pyx_L26; + } + + /* "View.MemoryView":899 + * dst.data = ( dst.data)[0] + suboffset + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< + * "must be indexed and not sliced", dim) + * else: + */ + /*else*/ { + + /* "View.MemoryView":900 + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< + * else: + * suboffset_dim[0] = new_ndim + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) + } + __pyx_L26:; + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + goto __pyx_L25; + } + + /* "View.MemoryView":902 + * "must be indexed and not sliced", dim) + * else: + * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< + * + * return 0 + */ + /*else*/ { + (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; + } + __pyx_L25:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + } + + /* "View.MemoryView":904 + * suboffset_dim[0] = new_ndim + * + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + +static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_suboffset; + Py_ssize_t __pyx_v_itemsize; + char *__pyx_v_resultp; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("pybuffer_index", 0); + + /* "View.MemoryView":912 + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< + * cdef Py_ssize_t itemsize = view.itemsize + * cdef char *resultp + */ + __pyx_v_suboffset = -1L; + + /* "View.MemoryView":913 + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< + * cdef char *resultp + * + */ + __pyx_t_1 = __pyx_v_view->itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":917 + * + * if view.ndim == 0: + * shape = view.len / itemsize # <<<<<<<<<<<<<< + * stride = itemsize + * else: + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); + + /* "View.MemoryView":918 + * if view.ndim == 0: + * shape = view.len / itemsize + * stride = itemsize # <<<<<<<<<<<<<< + * else: + * shape = view.shape[dim] + */ + __pyx_v_stride = __pyx_v_itemsize; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + goto __pyx_L3; + } + + /* "View.MemoryView":920 + * stride = itemsize + * else: + * shape = view.shape[dim] # <<<<<<<<<<<<<< + * stride = view.strides[dim] + * if view.suboffsets != NULL: + */ + /*else*/ { + __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); + + /* "View.MemoryView":921 + * else: + * shape = view.shape[dim] + * stride = view.strides[dim] # <<<<<<<<<<<<<< + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] + */ + __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":923 + * stride = view.strides[dim] + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< + * + * if index < 0: + */ + __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + } + } + __pyx_L3:; + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":926 + * + * if index < 0: + * index += view.shape[dim] # <<<<<<<<<<<<<< + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + */ + __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":928 + * index += view.shape[dim] + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * if index >= shape: + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 928, __pyx_L1_error) + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + } + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":931 + * + * if index >= shape: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * resultp = bufp + index * stride + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 931, __pyx_L1_error) + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":933 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * resultp = bufp + index * stride # <<<<<<<<<<<<<< + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset + */ + __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":935 + * resultp = bufp + index * stride + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< + * + * return resultp + */ + __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + } + + /* "View.MemoryView":937 + * resultp = ( resultp)[0] + suboffset + * + * return resultp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_resultp; + goto __pyx_L0; + + /* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + +static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { + int __pyx_v_ndim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + int __pyx_v_i; + int __pyx_v_j; + int __pyx_r; + int __pyx_t_1; + Py_ssize_t *__pyx_t_2; + long __pyx_t_3; + long __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":944 + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: + * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< + * + * cdef Py_ssize_t *shape = memslice.shape + */ + __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; + __pyx_v_ndim = __pyx_t_1; + + /* "View.MemoryView":946 + * cdef int ndim = memslice.memview.view.ndim + * + * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< + * cdef Py_ssize_t *strides = memslice.strides + * + */ + __pyx_t_2 = __pyx_v_memslice->shape; + __pyx_v_shape = __pyx_t_2; + + /* "View.MemoryView":947 + * + * cdef Py_ssize_t *shape = memslice.shape + * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = __pyx_v_memslice->strides; + __pyx_v_strides = __pyx_t_2; + + /* "View.MemoryView":951 + * + * cdef int i, j + * for i in range(ndim / 2): # <<<<<<<<<<<<<< + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + */ + __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":952 + * cdef int i, j + * for i in range(ndim / 2): + * j = ndim - 1 - i # <<<<<<<<<<<<<< + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] + */ + __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); + + /* "View.MemoryView":953 + * for i in range(ndim / 2): + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< + * shape[i], shape[j] = shape[j], shape[i] + * + */ + __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); + __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); + (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; + (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; + + /* "View.MemoryView":954 + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + */ + __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); + __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); + (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; + (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); + if (!__pyx_t_8) { + } else { + __pyx_t_7 = __pyx_t_8; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); + __pyx_t_7 = __pyx_t_8; + __pyx_L6_bool_binop_done:; + if (__pyx_t_7) { + + /* "View.MemoryView":957 + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< + * + * return 1 + */ + __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + } + } + + /* "View.MemoryView":959 + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + * return 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 1; + goto __pyx_L0; + + /* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = 0; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + +/* Python wrapper */ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":977 + * + * def __dealloc__(self): + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); + + /* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":981 + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) # <<<<<<<<<<<<<< + * else: + * return memoryview.convert_item_to_object(self, itemp) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + } + + /* "View.MemoryView":983 + * return self.to_object_func(itemp) + * else: + * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":987 + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< + * else: + * memoryview.assign_item_from_object(self, itemp, value) + */ + __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":989 + * self.to_dtype_func(itemp, value) + * else: + * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< + * + * @property + */ + /*else*/ { + __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":993 + * @property + * def base(self): + * return self.from_object # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->from_object); + __pyx_r = __pyx_v_self->from_object; + goto __pyx_L0; + + /* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + __Pyx_TypeInfo *__pyx_t_4; + Py_buffer __pyx_t_5; + Py_ssize_t *__pyx_t_6; + Py_ssize_t *__pyx_t_7; + Py_ssize_t *__pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_fromslice", 0); + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1008 + * + * if memviewslice.memview == Py_None: + * return None # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + } + + /* "View.MemoryView":1013 + * + * + * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< + * + * result.from_slice = memviewslice + */ + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1015 + * result = _memoryviewslice(None, 0, dtype_is_object) + * + * result.from_slice = memviewslice # <<<<<<<<<<<<<< + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + */ + __pyx_v_result->from_slice = __pyx_v_memviewslice; + + /* "View.MemoryView":1016 + * + * result.from_slice = memviewslice + * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< + * + * result.from_object = ( memviewslice.memview).base + */ + __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); + + /* "View.MemoryView":1018 + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< + * result.typeinfo = memviewslice.memview.typeinfo + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_v_result->from_object); + __Pyx_DECREF(__pyx_v_result->from_object); + __pyx_v_result->from_object = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":1019 + * + * result.from_object = ( memviewslice.memview).base + * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< + * + * result.view = memviewslice.memview.view + */ + __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; + __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; + + /* "View.MemoryView":1021 + * result.typeinfo = memviewslice.memview.typeinfo + * + * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + */ + __pyx_t_5 = __pyx_v_memviewslice.memview->view; + __pyx_v_result->__pyx_base.view = __pyx_t_5; + + /* "View.MemoryView":1022 + * + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + */ + __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); + + /* "View.MemoryView":1023 + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data + * result.view.ndim = ndim # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; + + /* "View.MemoryView":1024 + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; + + /* "View.MemoryView":1025 + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1028 + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< + * else: + * result.flags = PyBUF_RECORDS_RO + */ + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1030 + * result.flags = PyBUF_RECORDS + * else: + * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< + * + * result.view.shape = result.from_slice.shape + */ + /*else*/ { + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; + } + __pyx_L4:; + + /* "View.MemoryView":1032 + * result.flags = PyBUF_RECORDS_RO + * + * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< + * result.view.strides = result.from_slice.strides + * + */ + __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); + + /* "View.MemoryView":1033 + * + * result.view.shape = result.from_slice.shape + * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); + + /* "View.MemoryView":1036 + * + * + * result.view.suboffsets = NULL # <<<<<<<<<<<<<< + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + */ + __pyx_v_result->__pyx_base.view.suboffsets = NULL; + + /* "View.MemoryView":1037 + * + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + */ + __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_v_suboffset = (__pyx_t_6[0]); + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1039 + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); + + /* "View.MemoryView":1040 + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + * break # <<<<<<<<<<<<<< + * + * result.view.len = result.view.itemsize + */ + goto __pyx_L6_break; + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + } + } + __pyx_L6_break:; + + /* "View.MemoryView":1042 + * break + * + * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< + * for length in result.view.shape[:ndim]: + * result.view.len *= length + */ + __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + + /* "View.MemoryView":1043 + * + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< + * result.view.len *= length + * + */ + __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1044 + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: + * result.view.len *= length # <<<<<<<<<<<<<< + * + * result.to_object_func = to_object_func + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + } + + /* "View.MemoryView":1046 + * result.view.len *= length + * + * result.to_object_func = to_object_func # <<<<<<<<<<<<<< + * result.to_dtype_func = to_dtype_func + * + */ + __pyx_v_result->to_object_func = __pyx_v_to_object_func; + + /* "View.MemoryView":1047 + * + * result.to_object_func = to_object_func + * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; + + /* "View.MemoryView":1049 + * result.to_dtype_func = to_dtype_func + * + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { + struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; + __Pyx_memviewslice *__pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_slice_from_memview", 0); + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1056 + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): + * obj = memview # <<<<<<<<<<<<<< + * return &obj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":1057 + * if isinstance(memview, _memoryviewslice): + * obj = memview + * return &obj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, mslice) + */ + __pyx_r = (&__pyx_v_obj->from_slice); + goto __pyx_L0; + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + } + + /* "View.MemoryView":1059 + * return &obj.from_slice + * else: + * slice_copy(memview, mslice) # <<<<<<<<<<<<<< + * return mslice + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); + + /* "View.MemoryView":1060 + * else: + * slice_copy(memview, mslice) + * return mslice # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_slice_copy') + */ + __pyx_r = __pyx_v_mslice; + goto __pyx_L0; + } + + /* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_obj); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { + int __pyx_v_dim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + Py_ssize_t *__pyx_v_suboffsets; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + Py_ssize_t __pyx_t_5; + __Pyx_RefNannySetupContext("slice_copy", 0); + + /* "View.MemoryView":1067 + * cdef (Py_ssize_t*) shape, strides, suboffsets + * + * shape = memview.view.shape # <<<<<<<<<<<<<< + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets + */ + __pyx_t_1 = __pyx_v_memview->view.shape; + __pyx_v_shape = __pyx_t_1; + + /* "View.MemoryView":1068 + * + * shape = memview.view.shape + * strides = memview.view.strides # <<<<<<<<<<<<<< + * suboffsets = memview.view.suboffsets + * + */ + __pyx_t_1 = __pyx_v_memview->view.strides; + __pyx_v_strides = __pyx_t_1; + + /* "View.MemoryView":1069 + * shape = memview.view.shape + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< + * + * dst.memview = <__pyx_memoryview *> memview + */ + __pyx_t_1 = __pyx_v_memview->view.suboffsets; + __pyx_v_suboffsets = __pyx_t_1; + + /* "View.MemoryView":1071 + * suboffsets = memview.view.suboffsets + * + * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< + * dst.data = memview.view.buf + * + */ + __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); + + /* "View.MemoryView":1072 + * + * dst.memview = <__pyx_memoryview *> memview + * dst.data = memview.view.buf # <<<<<<<<<<<<<< + * + * for dim in range(memview.view.ndim): + */ + __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); + + /* "View.MemoryView":1074 + * dst.data = memview.view.buf + * + * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + */ + __pyx_t_2 = __pyx_v_memview->view.ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_dim = __pyx_t_4; + + /* "View.MemoryView":1075 + * + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + */ + (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); + + /* "View.MemoryView":1076 + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + * + */ + (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); + + /* "View.MemoryView":1077 + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object') + */ + if ((__pyx_v_suboffsets != 0)) { + __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); + } else { + __pyx_t_5 = -1L; + } + (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; + } + + /* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { + __Pyx_memviewslice __pyx_v_memviewslice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy", 0); + + /* "View.MemoryView":1083 + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< + * return memoryview_copy_from_slice(memview, &memviewslice) + * + */ + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); + + /* "View.MemoryView":1084 + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) + * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object_from_slice') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { + PyObject *(*__pyx_v_to_object_func)(char *); + int (*__pyx_v_to_dtype_func)(char *, PyObject *); + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *(*__pyx_t_3)(char *); + int (*__pyx_t_4)(char *, PyObject *); + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1095 + * + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + */ + __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; + __pyx_v_to_object_func = __pyx_t_3; + + /* "View.MemoryView":1096 + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< + * else: + * to_object_func = NULL + */ + __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; + __pyx_v_to_dtype_func = __pyx_t_4; + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1098 + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + * to_object_func = NULL # <<<<<<<<<<<<<< + * to_dtype_func = NULL + * + */ + /*else*/ { + __pyx_v_to_object_func = NULL; + + /* "View.MemoryView":1099 + * else: + * to_object_func = NULL + * to_dtype_func = NULL # <<<<<<<<<<<<<< + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + */ + __pyx_v_to_dtype_func = NULL; + } + __pyx_L3:; + + /* "View.MemoryView":1101 + * to_dtype_func = NULL + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< + * to_object_func, to_dtype_func, + * memview.dtype_is_object) + */ + __Pyx_XDECREF(__pyx_r); + + /* "View.MemoryView":1103 + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + * to_object_func, to_dtype_func, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + +static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { + Py_ssize_t __pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + __pyx_t_1 = ((__pyx_v_arg < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1111 + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: + * return -arg # <<<<<<<<<<<<<< + * else: + * return arg + */ + __pyx_r = (-__pyx_v_arg); + goto __pyx_L0; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + } + + /* "View.MemoryView":1113 + * return -arg + * else: + * return arg # <<<<<<<<<<<<<< + * + * @cname('__pyx_get_best_slice_order') + */ + /*else*/ { + __pyx_r = __pyx_v_arg; + goto __pyx_L0; + } + + /* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + +static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { + int __pyx_v_i; + Py_ssize_t __pyx_v_c_stride; + Py_ssize_t __pyx_v_f_stride; + char __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1121 + * """ + * cdef int i + * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< + * cdef Py_ssize_t f_stride = 0 + * + */ + __pyx_v_c_stride = 0; + + /* "View.MemoryView":1122 + * cdef int i + * cdef Py_ssize_t c_stride = 0 + * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_f_stride = 0; + + /* "View.MemoryView":1124 + * cdef Py_ssize_t f_stride = 0 + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1126 + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1127 + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + goto __pyx_L4_break; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L4_break:; + + /* "View.MemoryView":1129 + * break + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + */ + __pyx_t_1 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_1; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1131 + * for i in range(ndim): + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1132 + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + */ + goto __pyx_L7_break; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L7_break:; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1135 + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + * return 'C' # <<<<<<<<<<<<<< + * else: + * return 'F' + */ + __pyx_r = 'C'; + goto __pyx_L0; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + } + + /* "View.MemoryView":1137 + * return 'C' + * else: + * return 'F' # <<<<<<<<<<<<<< + * + * @cython.cdivision(True) + */ + /*else*/ { + __pyx_r = 'F'; + goto __pyx_L0; + } + + /* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + +static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; + Py_ssize_t __pyx_v_dst_extent; + Py_ssize_t __pyx_v_src_stride; + Py_ssize_t __pyx_v_dst_stride; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + + /* "View.MemoryView":1147 + * + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + */ + __pyx_v_src_extent = (__pyx_v_src_shape[0]); + + /* "View.MemoryView":1148 + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] + */ + __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); + + /* "View.MemoryView":1149 + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + */ + __pyx_v_src_stride = (__pyx_v_src_strides[0]); + + /* "View.MemoryView":1150 + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + + /* "View.MemoryView":1154 + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + */ + __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); + if (__pyx_t_2) { + __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); + } + __pyx_t_3 = (__pyx_t_2 != 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L5_bool_binop_done:; + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + if (__pyx_t_1) { + + /* "View.MemoryView":1155 + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1157 + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1158 + * else: + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< + * src_data += src_stride + * dst_data += dst_stride + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); + + /* "View.MemoryView":1159 + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * else: + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1160 + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L4:; + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1162 + * dst_data += dst_stride + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * _copy_strided_to_strided(src_data, src_strides + 1, + * dst_data, dst_strides + 1, + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1163 + * else: + * for i in range(dst_extent): + * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< + * dst_data, dst_strides + 1, + * src_shape + 1, dst_shape + 1, + */ + _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); + + /* "View.MemoryView":1167 + * src_shape + 1, dst_shape + 1, + * ndim - 1, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1168 + * ndim - 1, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + + /* function exit code */ +} + +/* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + +static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + + /* "View.MemoryView":1173 + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< + * src.shape, dst.shape, ndim, itemsize) + * + */ + _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_size; + Py_ssize_t __pyx_r; + Py_ssize_t __pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + + /* "View.MemoryView":1179 + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< + * + * for shape in src.shape[:ndim]: + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_size = __pyx_t_1; + + /* "View.MemoryView":1181 + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + * + * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< + * size *= shape + * + */ + __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); + for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_shape = (__pyx_t_2[0]); + + /* "View.MemoryView":1182 + * + * for shape in src.shape[:ndim]: + * size *= shape # <<<<<<<<<<<<<< + * + * return size + */ + __pyx_v_size = (__pyx_v_size * __pyx_v_shape); + } + + /* "View.MemoryView":1184 + * size *= shape + * + * return size # <<<<<<<<<<<<<< + * + * @cname('__pyx_fill_contig_strides_array') + */ + __pyx_r = __pyx_v_size; + goto __pyx_L0; + + /* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { + int __pyx_v_idx; + Py_ssize_t __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + __pyx_t_1 = ((__pyx_v_order == 'F') != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1197 + * + * if order == 'F': + * for idx in range(ndim): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + __pyx_t_2 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_idx = __pyx_t_4; + + /* "View.MemoryView":1198 + * if order == 'F': + * for idx in range(ndim): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * else: + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1199 + * for idx in range(ndim): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * else: + * for idx in range(ndim - 1, -1, -1): + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1201 + * stride *= shape[idx] + * else: + * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + /*else*/ { + for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { + __pyx_v_idx = __pyx_t_2; + + /* "View.MemoryView":1202 + * else: + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1203 + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * + * return stride + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + } + __pyx_L3:; + + /* "View.MemoryView":1205 + * stride *= shape[idx] + * + * return stride # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_data_to_temp') + */ + __pyx_r = __pyx_v_stride; + goto __pyx_L0; + + /* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { + int __pyx_v_i; + void *__pyx_v_result; + size_t __pyx_v_itemsize; + size_t __pyx_v_size; + void *__pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + struct __pyx_memoryview_obj *__pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1219 + * cdef void *result + * + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef size_t size = slice_get_size(src, ndim) + * + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1220 + * + * cdef size_t itemsize = src.memview.view.itemsize + * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< + * + * result = malloc(size) + */ + __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); + + /* "View.MemoryView":1222 + * cdef size_t size = slice_get_size(src, ndim) + * + * result = malloc(size) # <<<<<<<<<<<<<< + * if not result: + * _err(MemoryError, NULL) + */ + __pyx_v_result = malloc(__pyx_v_size); + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1224 + * result = malloc(size) + * if not result: + * _err(MemoryError, NULL) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + } + + /* "View.MemoryView":1227 + * + * + * tmpslice.data = result # <<<<<<<<<<<<<< + * tmpslice.memview = src.memview + * for i in range(ndim): + */ + __pyx_v_tmpslice->data = ((char *)__pyx_v_result); + + /* "View.MemoryView":1228 + * + * tmpslice.data = result + * tmpslice.memview = src.memview # <<<<<<<<<<<<<< + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + */ + __pyx_t_4 = __pyx_v_src->memview; + __pyx_v_tmpslice->memview = __pyx_t_4; + + /* "View.MemoryView":1229 + * tmpslice.data = result + * tmpslice.memview = src.memview + * for i in range(ndim): # <<<<<<<<<<<<<< + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1230 + * tmpslice.memview = src.memview + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< + * tmpslice.suboffsets[i] = -1 + * + */ + (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); + + /* "View.MemoryView":1231 + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, + */ + (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1233 + * tmpslice.suboffsets[i] = -1 + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< + * ndim, order) + * + */ + (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); + + /* "View.MemoryView":1237 + * + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1239 + * for i in range(ndim): + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< + * + * if slice_is_contig(src[0], order, ndim): + */ + (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + } + } + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1242 + * + * if slice_is_contig(src[0], order, ndim): + * memcpy(result, src.data, size) # <<<<<<<<<<<<<< + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + */ + (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":1244 + * memcpy(result, src.data, size) + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< + * + * return result + */ + /*else*/ { + copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); + } + __pyx_L9:; + + /* "View.MemoryView":1246 + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = NULL; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + +static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_extents", 0); + + /* "View.MemoryView":1254 + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + * (i, extent1, extent2)) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err_dim') + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":1253 + * cdef int _err_extents(int i, Py_ssize_t extent1, + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< + * (i, extent1, extent2)) + * + */ + __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 1253, __pyx_L1_error) + + /* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + +static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_dim", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1258 + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: + * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err') + */ + __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_INCREF(__pyx_v_error); + __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 1258, __pyx_L1_error) + + /* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + +static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":1263 + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: + * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< + * else: + * raise error + */ + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_error); + __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 1263, __pyx_L1_error) + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + } + + /* "View.MemoryView":1265 + * raise error(msg.decode('ascii')) + * else: + * raise error # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_contents') + */ + /*else*/ { + __Pyx_Raise(__pyx_v_error, 0, 0, 0); + __PYX_ERR(1, 1265, __pyx_L1_error) + } + + /* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { + void *__pyx_v_tmpdata; + size_t __pyx_v_itemsize; + int __pyx_v_i; + char __pyx_v_order; + int __pyx_v_broadcasting; + int __pyx_v_direct_copy; + __Pyx_memviewslice __pyx_v_tmp; + int __pyx_v_ndim; + int __pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + void *__pyx_t_7; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1276 + * Check for overlapping memory and verify the shapes. + * """ + * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + */ + __pyx_v_tmpdata = NULL; + + /* "View.MemoryView":1277 + * """ + * cdef void *tmpdata = NULL + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + */ + __pyx_t_1 = __pyx_v_src.memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1279 + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< + * cdef bint broadcasting = False + * cdef bint direct_copy = False + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); + + /* "View.MemoryView":1280 + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False # <<<<<<<<<<<<<< + * cdef bint direct_copy = False + * cdef __Pyx_memviewslice tmp + */ + __pyx_v_broadcasting = 0; + + /* "View.MemoryView":1281 + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False + * cdef bint direct_copy = False # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice tmp + * + */ + __pyx_v_direct_copy = 0; + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1285 + * + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1287 + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< + * + * cdef int ndim = max(src_ndim, dst_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + } + __pyx_L3:; + + /* "View.MemoryView":1289 + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + __pyx_t_3 = __pyx_v_dst_ndim; + __pyx_t_4 = __pyx_v_src_ndim; + if (((__pyx_t_3 > __pyx_t_4) != 0)) { + __pyx_t_5 = __pyx_t_3; + } else { + __pyx_t_5 = __pyx_t_4; + } + __pyx_v_ndim = __pyx_t_5; + + /* "View.MemoryView":1291 + * cdef int ndim = max(src_ndim, dst_ndim) + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + */ + __pyx_t_5 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_5; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1294 + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + * broadcasting = True # <<<<<<<<<<<<<< + * src.strides[i] = 0 + * else: + */ + __pyx_v_broadcasting = 1; + + /* "View.MemoryView":1295 + * if src.shape[i] == 1: + * broadcasting = True + * src.strides[i] = 0 # <<<<<<<<<<<<<< + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) + */ + (__pyx_v_src.strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + goto __pyx_L7; + } + + /* "View.MemoryView":1297 + * src.strides[i] = 0 + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< + * + * if src.suboffsets[i] >= 0: + */ + /*else*/ { + __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + } + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1300 + * + * if src.suboffsets[i] >= 0: + * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< + * + * if slices_overlap(&src, &dst, ndim, itemsize): + */ + __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + } + } + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1305 + * + * if not slice_is_contig(src, order, ndim): + * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + } + + /* "View.MemoryView":1307 + * order = get_best_order(&dst, ndim) + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< + * src = tmp + * + */ + __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) + __pyx_v_tmpdata = __pyx_t_7; + + /* "View.MemoryView":1308 + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + * src = tmp # <<<<<<<<<<<<<< + * + * if not broadcasting: + */ + __pyx_v_src = __pyx_v_tmp; + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1314 + * + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + goto __pyx_L12; + } + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1316 + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< + * + * if direct_copy: + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + } + __pyx_L12:; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_2 = (__pyx_v_direct_copy != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1320 + * if direct_copy: + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1321 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + */ + (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); + + /* "View.MemoryView":1322 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * free(tmpdata) + * return 0 + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1323 + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1324 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * if order == 'F' == get_best_order(&dst, ndim): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = (__pyx_v_order == 'F'); + if (__pyx_t_2) { + __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); + } + __pyx_t_8 = (__pyx_t_2 != 0); + if (__pyx_t_8) { + + /* "View.MemoryView":1329 + * + * + * transpose_memslice(&src) # <<<<<<<<<<<<<< + * transpose_memslice(&dst) + * + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) + + /* "View.MemoryView":1330 + * + * transpose_memslice(&src) + * transpose_memslice(&dst) # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1332 + * transpose_memslice(&dst) + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1333 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + */ + copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1334 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * free(tmpdata) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1336 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1337 + * + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_broadcast_leading') + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { + int __pyx_v_i; + int __pyx_v_offset; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + + /* "View.MemoryView":1344 + * int ndim_other) nogil: + * cdef int i + * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); + + /* "View.MemoryView":1346 + * cdef int offset = ndim_other - ndim + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1347 + * + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + */ + (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); + + /* "View.MemoryView":1348 + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + */ + (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1349 + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< + * + * for i in range(offset): + */ + (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); + } + + /* "View.MemoryView":1351 + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + * for i in range(offset): # <<<<<<<<<<<<<< + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + */ + __pyx_t_1 = __pyx_v_offset; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1352 + * + * for i in range(offset): + * mslice.shape[i] = 1 # <<<<<<<<<<<<<< + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 + */ + (__pyx_v_mslice->shape[__pyx_v_i]) = 1; + + /* "View.MemoryView":1353 + * for i in range(offset): + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< + * mslice.suboffsets[i] = -1 + * + */ + (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); + + /* "View.MemoryView":1354 + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { + int __pyx_t_1; + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + __pyx_t_1 = (__pyx_v_dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1367 + * + * if dtype_is_object: + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< + * dst.strides, ndim, inc) + * + */ + __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + } + + /* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + + /* function exit code */ +} + +/* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + __Pyx_RefNannyDeclarations + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); + + /* "View.MemoryView":1374 + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif +} + +/* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + +static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); + + /* "View.MemoryView":1381 + * cdef Py_ssize_t i + * + * for i in range(shape[0]): # <<<<<<<<<<<<<< + * if ndim == 1: + * if inc: + */ + __pyx_t_1 = (__pyx_v_shape[0]); + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + __pyx_t_4 = (__pyx_v_inc != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1384 + * if ndim == 1: + * if inc: + * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * Py_DECREF(( data)[0]) + */ + Py_INCREF((((PyObject **)__pyx_v_data)[0])); + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":1386 + * Py_INCREF(( data)[0]) + * else: + * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + */ + /*else*/ { + Py_DECREF((((PyObject **)__pyx_v_data)[0])); + } + __pyx_L6:; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + goto __pyx_L5; + } + + /* "View.MemoryView":1388 + * Py_DECREF(( data)[0]) + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, inc) + * + */ + /*else*/ { + + /* "View.MemoryView":1389 + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + * ndim - 1, inc) # <<<<<<<<<<<<<< + * + * data += strides[0] + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); + } + __pyx_L5:; + + /* "View.MemoryView":1391 + * ndim - 1, inc) + * + * data += strides[0] # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); + } + + /* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { + + /* "View.MemoryView":1400 + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1401 + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1403 + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + +static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_extent; + int __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + + /* "View.MemoryView":1411 + * size_t itemsize, void *item) nogil: + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t extent = shape[0] + * + */ + __pyx_v_stride = (__pyx_v_strides[0]); + + /* "View.MemoryView":1412 + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] + * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_extent = (__pyx_v_shape[0]); + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1415 + * + * if ndim == 1: + * for i in range(extent): # <<<<<<<<<<<<<< + * memcpy(data, item, itemsize) + * data += stride + */ + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1416 + * if ndim == 1: + * for i in range(extent): + * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< + * data += stride + * else: + */ + (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); + + /* "View.MemoryView":1417 + * for i in range(extent): + * memcpy(data, item, itemsize) + * data += stride # <<<<<<<<<<<<<< + * else: + * for i in range(extent): + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1419 + * data += stride + * else: + * for i in range(extent): # <<<<<<<<<<<<<< + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + */ + /*else*/ { + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1420 + * else: + * for i in range(extent): + * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, itemsize, item) + * data += stride + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1422 + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + * data += stride # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + + /* function exit code */ +} + +/* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v___pyx_type = 0; + long __pyx_v___pyx_checksum; + PyObject *__pyx_v___pyx_state = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v___pyx_type = values[0]; + __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_v___pyx_state = values[2]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_v___pyx_PickleError = 0; + PyObject *__pyx_v___pyx_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); + if (__pyx_t_1) { + + /* "(tree fragment)":5 + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + */ + __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_n_s_PickleError); + __Pyx_GIVEREF(__pyx_n_s_PickleError); + PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); + __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_t_2); + __pyx_v___pyx_PickleError = __pyx_t_2; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":6 + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + */ + __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_INCREF(__pyx_v___pyx_PickleError); + __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 6, __pyx_L1_error) + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + } + + /* "(tree fragment)":7 + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v___pyx_result = __pyx_t_3; + __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + __pyx_t_1 = (__pyx_v___pyx_state != Py_None); + __pyx_t_6 = (__pyx_t_1 != 0); + if (__pyx_t_6) { + + /* "(tree fragment)":9 + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) + __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + } + + /* "(tree fragment)":10 + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result # <<<<<<<<<<<<<< + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v___pyx_result); + __pyx_r = __pyx_v___pyx_result; + goto __pyx_L0; + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v___pyx_PickleError); + __Pyx_XDECREF(__pyx_v___pyx_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); + + /* "(tree fragment)":12 + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 12, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v___pyx_result->name); + __Pyx_DECREF(__pyx_v___pyx_result->name); + __pyx_v___pyx_result->name = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 13, __pyx_L1_error) + } + __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_4 = ((__pyx_t_3 > 1) != 0); + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_5 = (__pyx_t_4 != 0); + __pyx_t_2 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (__pyx_t_2) { + + /* "(tree fragment)":14 + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< + */ + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 14, __pyx_L1_error) + } + __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + } + } + __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + } + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static struct __pyx_vtabstruct_array __pyx_vtable_array; + +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_array_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_array_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_array; + p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_array(PyObject *o) { + struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_array___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->mode); + Py_CLEAR(p->_format); + (*Py_TYPE(o)->tp_free)(o); +} +static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_array___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { + PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); + if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + v = __pyx_array___getattr__(o, n); + } + return v; +} + +static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); +} + +static PyMethodDef __pyx_methods_array[] = { + {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_array[] = { + {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_array = { + __pyx_array___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_array, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_array = { + __pyx_array___len__, /*mp_length*/ + __pyx_array___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_array = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_array_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_array = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.array", /*tp_name*/ + sizeof(struct __pyx_array_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_array, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + __pyx_tp_getattro_array, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_array, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_array, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_array, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_MemviewEnum_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_MemviewEnum_obj *)o); + p->name = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_Enum(PyObject *o) { + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + Py_CLEAR(p->name); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + if (p->name) { + e = (*v)(p->name, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_Enum(PyObject *o) { + PyObject* tmp; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + tmp = ((PyObject*)p->name); + p->name = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyMethodDef __pyx_methods_Enum[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_MemviewEnum = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.Enum", /*tp_name*/ + sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_Enum, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_MemviewEnum___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_Enum, /*tp_traverse*/ + __pyx_tp_clear_Enum, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_Enum, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_MemviewEnum___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_Enum, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; + +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryview_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryview_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_memoryview; + p->obj = Py_None; Py_INCREF(Py_None); + p->_size = Py_None; Py_INCREF(Py_None); + p->_array_interface = Py_None; Py_INCREF(Py_None); + p->view.obj = NULL; + if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_memoryview(PyObject *o) { + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryview___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->obj); + Py_CLEAR(p->_size); + Py_CLEAR(p->_array_interface); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + if (p->obj) { + e = (*v)(p->obj, a); if (e) return e; + } + if (p->_size) { + e = (*v)(p->_size, a); if (e) return e; + } + if (p->_array_interface) { + e = (*v)(p->_array_interface, a); if (e) return e; + } + if (p->view.obj) { + e = (*v)(p->view.obj, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_memoryview(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + tmp = ((PyObject*)p->obj); + p->obj = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_size); + p->_size = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_array_interface); + p->_array_interface = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + Py_CLEAR(p->view.obj); + return 0; +} +static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_memoryview___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); +} + +static PyMethodDef __pyx_methods_memoryview[] = { + {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, + {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, + {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, + {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_memoryview[] = { + {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, + {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, + {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, + {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, + {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, + {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, + {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, + {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, + {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_memoryview = { + __pyx_memoryview___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_memoryview, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_memoryview = { + __pyx_memoryview___len__, /*mp_length*/ + __pyx_memoryview___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_memoryview = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_memoryview_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_memoryview = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.memoryview", /*tp_name*/ + sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_memoryview___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + __pyx_memoryview___str__, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_memoryview, /*tp_traverse*/ + __pyx_tp_clear_memoryview, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_memoryview, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_memoryview, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_memoryview, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; + +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryviewslice_obj *p; + PyObject *o = __pyx_tp_new_memoryview(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryviewslice_obj *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; + p->from_object = Py_None; Py_INCREF(Py_None); + p->from_slice.memview = NULL; + return o; +} + +static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryviewslice___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->from_object); + PyObject_GC_Track(o); + __pyx_tp_dealloc_memoryview(o); +} + +static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; + if (p->from_object) { + e = (*v)(p->from_object, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear__memoryviewslice(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + __pyx_tp_clear_memoryview(o); + tmp = ((PyObject*)p->from_object); + p->from_object = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + __PYX_XDEC_MEMVIEW(&p->from_slice, 1); + return 0; +} + +static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); +} + +static PyMethodDef __pyx_methods__memoryviewslice[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { + {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_memoryviewslice = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core._memoryviewslice", /*tp_name*/ + sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___repr__, /*tp_repr*/ + #else + 0, /*tp_repr*/ + #endif + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___str__, /*tp_str*/ + #else + 0, /*tp_str*/ + #endif + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "Internal class for passing memoryview slices to Python", /*tp_doc*/ + __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ + __pyx_tp_clear__memoryviewslice, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods__memoryviewslice, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets__memoryviewslice, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new__memoryviewslice, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_core}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "core", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, + {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, + {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, + {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, + {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, + {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, + {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, + {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, + {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, + {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, + {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, + {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, + {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, + {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, + {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, + {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, + {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, + {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, + {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, + {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, + {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, + {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, + {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, + {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, + {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, + {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, + {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, + {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, + {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, + {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, + {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, + {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, + {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, + {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, + {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, + {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, + {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, + {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, + {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, + {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, + {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, + {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, + {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) + __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) + __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) + __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) + __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_INCREF(__pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_int_neg_1); + PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_tuple__13); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__18); + __Pyx_GIVEREF(__pyx_tuple__18); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__19); + __Pyx_GIVEREF(__pyx_tuple__19); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__20); + __Pyx_GIVEREF(__pyx_tuple__20); + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__22); + __Pyx_GIVEREF(__pyx_tuple__22); + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__24); + __Pyx_GIVEREF(__pyx_tuple__24); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__25); + __Pyx_GIVEREF(__pyx_tuple__25); + __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + /* InitThreads.init */ + #ifdef WITH_THREAD +PyEval_InitThreads(); +#endif + +if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + generic = Py_None; Py_INCREF(Py_None); + strided = Py_None; Py_INCREF(Py_None); + indirect = Py_None; Py_INCREF(Py_None); + contiguous = Py_None; Py_INCREF(Py_None); + indirect_contiguous = Py_None; Py_INCREF(Py_None); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_vtabptr_array = &__pyx_vtable_array; + __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; + if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_array.tp_print = 0; + #endif + if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + __pyx_array_type = &__pyx_type___pyx_array; + if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_MemviewEnum.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; + __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; + __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; + __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; + __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; + __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; + __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; + __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; + __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; + if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryview.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + __pyx_memoryview_type = &__pyx_type___pyx_memoryview; + __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; + __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; + __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; + __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; + __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; + if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryviewslice.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initcore(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_core(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + static PyThread_type_lock __pyx_t_2[8]; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_monotonic_align__core) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "monotonic_align.core")) { + if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_type_import_code(); + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + __pyx_k_ = (-1e9); + + /* "monotonic_align/core.pyx":1 + * cimport cython # <<<<<<<<<<<<<< + * from cython.parallel import prange + * + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":209 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * def __dealloc__(array self): + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_array_type); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(generic); + __Pyx_DECREF_SET(generic, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(strided); + __Pyx_DECREF_SET(strided, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect); + __Pyx_DECREF_SET(indirect, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(contiguous); + __Pyx_DECREF_SET(contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect_contiguous); + __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":316 + * + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ + * PyThread_allocate_lock(), + */ + __pyx_memoryview_thread_locks_used = 0; + + /* "View.MemoryView":317 + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< + * PyThread_allocate_lock(), + * PyThread_allocate_lock(), + */ + __pyx_t_2[0] = PyThread_allocate_lock(); + __pyx_t_2[1] = PyThread_allocate_lock(); + __pyx_t_2[2] = PyThread_allocate_lock(); + __pyx_t_2[3] = PyThread_allocate_lock(); + __pyx_t_2[4] = PyThread_allocate_lock(); + __pyx_t_2[5] = PyThread_allocate_lock(); + __pyx_t_2[6] = PyThread_allocate_lock(); + __pyx_t_2[7] = PyThread_allocate_lock(); + memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); + + /* "View.MemoryView":549 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryview_type); + + /* "View.MemoryView":995 + * return self.from_object + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryviewslice_type); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* MemviewSliceInit */ +static int +__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference) +{ + __Pyx_RefNannyDeclarations + int i, retval=-1; + Py_buffer *buf = &memview->view; + __Pyx_RefNannySetupContext("init_memviewslice", 0); + if (unlikely(memviewslice->memview || memviewslice->data)) { + PyErr_SetString(PyExc_ValueError, + "memviewslice is already initialized!"); + goto fail; + } + if (buf->strides) { + for (i = 0; i < ndim; i++) { + memviewslice->strides[i] = buf->strides[i]; + } + } else { + Py_ssize_t stride = buf->itemsize; + for (i = ndim - 1; i >= 0; i--) { + memviewslice->strides[i] = stride; + stride *= buf->shape[i]; + } + } + for (i = 0; i < ndim; i++) { + memviewslice->shape[i] = buf->shape[i]; + if (buf->suboffsets) { + memviewslice->suboffsets[i] = buf->suboffsets[i]; + } else { + memviewslice->suboffsets[i] = -1; + } + } + memviewslice->memview = memview; + memviewslice->data = (char *)buf->buf; + if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { + Py_INCREF(memview); + } + retval = 0; + goto no_fail; +fail: + memviewslice->memview = 0; + memviewslice->data = 0; + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} +#ifndef Py_NO_RETURN +#define Py_NO_RETURN +#endif +static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { + va_list vargs; + char msg[200]; +#ifdef HAVE_STDARG_PROTOTYPES + va_start(vargs, fmt); +#else + va_start(vargs); +#endif + vsnprintf(msg, 200, fmt, vargs); + va_end(vargs); + Py_FatalError(msg); +} +static CYTHON_INLINE int +__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)++; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE int +__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)--; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE void +__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) +{ + int first_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) + return; + if (unlikely(__pyx_get_slice_count(memview) < 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + first_time = __pyx_add_acquisition_count(memview) == 0; + if (unlikely(first_time)) { + if (have_gil) { + Py_INCREF((PyObject *) memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_INCREF((PyObject *) memview); + PyGILState_Release(_gilstate); + } + } +} +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, + int have_gil, int lineno) { + int last_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + memslice->memview = NULL; + return; + } + if (unlikely(__pyx_get_slice_count(memview) <= 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + last_time = __pyx_sub_acquisition_count(memview) == 1; + memslice->data = NULL; + if (unlikely(last_time)) { + if (have_gil) { + Py_CLEAR(memslice->memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_CLEAR(memslice->memview); + PyGILState_Release(_gilstate); + } + } else { + memslice->memview = NULL; + } +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* None */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { + PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* BytesEquals */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + +/* None */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { + Py_ssize_t q = a / b; + Py_ssize_t r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* GetAttr */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* ObjectGetItem */ +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { + PyObject *runerr; + Py_ssize_t key_value; + PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; + if (unlikely(!(m && m->sq_item))) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); + return NULL; + } + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { + PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; + if (likely(m && m->mp_subscript)) { + return m->mp_subscript(obj, key); + } + return __Pyx_PyObject_GetIndex(obj, key); +} +#endif + +/* decode_c_string */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + Py_ssize_t length; + if (unlikely((start < 0) | (stop < 0))) { + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (unlikely(stop <= start)) + return __Pyx_NewRef(__pyx_empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetAttr3 */ +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r = __Pyx_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +} + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* ExtTypeTest */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0 || (x^b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + const digit* digits = ((PyLongObject*)op1)->ob_digit; + const Py_ssize_t size = Py_SIZE(op1); + if (likely(__Pyx_sst_abs(size) <= 1)) { + a = likely(size) ? digits[0] : 0; + if (size == -1) a = -a; + } else { + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + } + x = a + b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla + llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + double result; + PyFPE_START_PROTECT("add", return NULL) + result = ((double)a) + (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#endif + +/* None */ +static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* HasAttr */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (unlikely(!r)) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* SetVTable */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { +#if PY_VERSION_HEX >= 0x02070000 + PyObject *ob = PyCapsule_New(vtable, 0, 0); +#else + PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); +#endif + if (!ob) + goto bad; + if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + +/* MemviewSliceIsContig */ +static int +__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) +{ + int i, index, step, start; + Py_ssize_t itemsize = mvs.memview->view.itemsize; + if (order == 'F') { + step = 1; + start = 0; + } else { + step = -1; + start = ndim - 1; + } + for (i = 0; i < ndim; i++) { + index = start + step * i; + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + return 0; + itemsize *= mvs.shape[index]; + } + return 1; +} + +/* OverlappingSlices */ +static void +__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, + void **out_start, void **out_end, + int ndim, size_t itemsize) +{ + char *start, *end; + int i; + start = end = slice->data; + for (i = 0; i < ndim; i++) { + Py_ssize_t stride = slice->strides[i]; + Py_ssize_t extent = slice->shape[i]; + if (extent == 0) { + *out_start = *out_end = start; + return; + } else { + if (stride > 0) + end += stride * (extent - 1); + else + start += stride * (extent - 1); + } + } + *out_start = start; + *out_end = end + itemsize; +} +static int +__pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize) +{ + void *start1, *end1, *start2, *end2; + __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); + __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); + return (start1 < end2) && (start2 < end1); +} + +/* Capsule */ +static CYTHON_INLINE PyObject * +__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) +{ + PyObject *cobj; +#if PY_VERSION_HEX >= 0x02070000 + cobj = PyCapsule_New(p, sig, NULL); +#else + cobj = PyCObject_FromVoidPtr(p, NULL); +#endif + return cobj; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* TypeInfoCompare */ + static int +__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) +{ + int i; + if (!a || !b) + return 0; + if (a == b) + return 1; + if (a->size != b->size || a->typegroup != b->typegroup || + a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { + if (a->typegroup == 'H' || b->typegroup == 'H') { + return a->size == b->size; + } else { + return 0; + } + } + if (a->ndim) { + for (i = 0; i < a->ndim; i++) + if (a->arraysize[i] != b->arraysize[i]) + return 0; + } + if (a->typegroup == 'S') { + if (a->flags != b->flags) + return 0; + if (a->fields || b->fields) { + if (!(a->fields && b->fields)) + return 0; + for (i = 0; a->fields[i].type && b->fields[i].type; i++) { + __Pyx_StructField *field_a = a->fields + i; + __Pyx_StructField *field_b = b->fields + i; + if (field_a->offset != field_b->offset || + !__pyx_typeinfo_cmp(field_a->type, field_b->type)) + return 0; + } + return !a->fields[i].type && !b->fields[i].type; + } + } + return 1; +} + +/* MemviewSliceValidateAndInit */ + static int +__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) +{ + if (buf->shape[dim] <= 1) + return 1; + if (buf->strides) { + if (spec & __Pyx_MEMVIEW_CONTIG) { + if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { + if (unlikely(buf->strides[dim] != sizeof(void *))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly contiguous " + "in dimension %d.", dim); + goto fail; + } + } else if (unlikely(buf->strides[dim] != buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_FOLLOW) { + Py_ssize_t stride = buf->strides[dim]; + if (stride < 0) + stride = -stride; + if (unlikely(stride < buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + } else { + if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not contiguous in " + "dimension %d", dim); + goto fail; + } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not indirect in " + "dimension %d", dim); + goto fail; + } else if (unlikely(buf->suboffsets)) { + PyErr_SetString(PyExc_ValueError, + "Buffer exposes suboffsets but no strides"); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) +{ + if (spec & __Pyx_MEMVIEW_DIRECT) { + if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { + PyErr_Format(PyExc_ValueError, + "Buffer not compatible with direct access " + "in dimension %d.", dim); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_PTR) { + if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly accessible " + "in dimension %d.", dim); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) +{ + int i; + if (c_or_f_flag & __Pyx_IS_F_CONTIG) { + Py_ssize_t stride = 1; + for (i = 0; i < ndim; i++) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not fortran contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { + Py_ssize_t stride = 1; + for (i = ndim - 1; i >- 1; i--) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not C contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } + return 1; +fail: + return 0; +} +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj) +{ + struct __pyx_memoryview_obj *memview, *new_memview; + __Pyx_RefNannyDeclarations + Py_buffer *buf; + int i, spec = 0, retval = -1; + __Pyx_BufFmt_Context ctx; + int from_memoryview = __pyx_memoryview_check(original_obj); + __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); + if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) + original_obj)->typeinfo)) { + memview = (struct __pyx_memoryview_obj *) original_obj; + new_memview = NULL; + } else { + memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + original_obj, buf_flags, 0, dtype); + new_memview = memview; + if (unlikely(!memview)) + goto fail; + } + buf = &memview->view; + if (unlikely(buf->ndim != ndim)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + ndim, buf->ndim); + goto fail; + } + if (new_memview) { + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; + } + if (unlikely((unsigned) buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " + "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", + buf->itemsize, + (buf->itemsize > 1) ? "s" : "", + dtype->name, + dtype->size, + (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->len > 0) { + for (i = 0; i < ndim; i++) { + spec = axes_specs[i]; + if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) + goto fail; + if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) + goto fail; + } + if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) + goto fail; + } + if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, + new_memview != NULL) == -1)) { + goto fail; + } + retval = 0; + goto no_fail; +fail: + Py_XDECREF(new_memview); + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, + &__Pyx_TypeInfo_float, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* MemviewSliceCopyTemplate */ + static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object) +{ + __Pyx_RefNannyDeclarations + int i; + __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; + struct __pyx_memoryview_obj *from_memview = from_mvs->memview; + Py_buffer *buf = &from_memview->view; + PyObject *shape_tuple = NULL; + PyObject *temp_int = NULL; + struct __pyx_array_obj *array_obj = NULL; + struct __pyx_memoryview_obj *memview_obj = NULL; + __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); + for (i = 0; i < ndim; i++) { + if (unlikely(from_mvs->suboffsets[i] >= 0)) { + PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " + "indirect dimensions (axis %d)", i); + goto fail; + } + } + shape_tuple = PyTuple_New(ndim); + if (unlikely(!shape_tuple)) { + goto fail; + } + __Pyx_GOTREF(shape_tuple); + for(i = 0; i < ndim; i++) { + temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); + if(unlikely(!temp_int)) { + goto fail; + } else { + PyTuple_SET_ITEM(shape_tuple, i, temp_int); + temp_int = NULL; + } + } + array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); + if (unlikely(!array_obj)) { + goto fail; + } + __Pyx_GOTREF(array_obj); + memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + (PyObject *) array_obj, contig_flag, + dtype_is_object, + from_mvs->memview->typeinfo); + if (unlikely(!memview_obj)) + goto fail; + if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) + goto fail; + if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, + dtype_is_object) < 0)) + goto fail; + goto no_fail; +fail: + __Pyx_XDECREF(new_mvs.memview); + new_mvs.memview = NULL; + new_mvs.data = NULL; +no_fail: + __Pyx_XDECREF(shape_tuple); + __Pyx_XDECREF(temp_int); + __Pyx_XDECREF(array_obj); + __Pyx_RefNannyFinishContext(); + return new_mvs; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { + const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(char) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (char) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { + return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { + return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { + return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(char) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) + case -2: + if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + } +#endif + if (sizeof(char) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + char val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (char) -1; + } + } else { + char val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (char) -1; + val = __Pyx_PyInt_As_char(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to char"); + return (char) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to char"); + return (char) -1; +} + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/nemo/collections/tts/modules/monotonic_align/core.pyx b/nemo/collections/tts/modules/monotonic_align/core.pyx new file mode 100644 index 000000000000..bfaabd4d21c2 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/core.pyx @@ -0,0 +1,42 @@ +cimport cython +from cython.parallel import prange + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: + cdef int x + cdef int y + cdef float v_prev + cdef float v_cur + cdef float tmp + cdef int index = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y-1, x] + if x == 0: + if y == 0: + v_prev = 0. + else: + v_prev = max_neg_val + else: + v_prev = value[y-1, x-1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + index = index - 1 + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + cdef int b = paths.shape[0] + cdef int i + for i in prange(b, nogil=True): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py new file mode 100644 index 000000000000..30c224807a70 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/setup.py @@ -0,0 +1,9 @@ +from distutils.core import setup +from Cython.Build import cythonize +import numpy + +setup( + name = 'monotonic_align', + ext_modules = cythonize("core.pyx"), + include_dirs=[numpy.get_include()] +) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index dba9bdda87aa..ce08e72e3b26 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -18,22 +18,14 @@ from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger import math -import commons -import modules -import attentions -import monotonic_align - from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform from nemo.collections.asr.data.audio_to_text import FastPitchDataset from nemo.collections.common.parts.preprocessing import parsers from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.modules.monotonic_align import maximum_path from nemo.core.classes.common import PretrainedModelInfo, typecheck from nemo.core.neural_types.elements import ( MelSpectrogramType, @@ -196,7 +188,7 @@ def forward(self, x, x_mask, g=None, **kwargs): else: g_l = torch.zeros_like(x_in) - acts = commons.fused_add_tanh_sigmoid_multiply( + acts = fused_add_tanh_sigmoid_multiply( x_in, g_l, n_channels_tensor) @@ -568,7 +560,7 @@ def __init__(self, self.emb = nn.Embedding(n_vocab, hidden_channels) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - self.encoder = attentions.Encoder( + self.encoder = Encoder( hidden_channels, filter_channels, n_heads, @@ -580,7 +572,7 @@ def __init__(self, def forward(self, x, x_lengths): x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) x = self.encoder(x * x_mask, x_mask) stats = self.proj(x) * x_mask @@ -645,7 +637,7 @@ def __init__(self, self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask @@ -889,7 +881,7 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() w = attn.sum(2) if self.use_sdp: @@ -904,7 +896,7 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) + z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) o = self.dec(z_slice, g=g) return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) @@ -922,9 +914,9 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca w = torch.exp(logw) * x_mask * length_scale w_ceil = torch.ceil(w) y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).to(x_mask.dtype) attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) + attn = generate_path(w_ceil, attn_mask) m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] @@ -944,6 +936,10 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): o_hat = self.dec(z_hat * y_mask, g=g_tgt) return o_hat, y_mask, (z, z_p, z_hat) +################## +# Mel_processing # +################## + def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): global mel_basis @@ -985,3 +981,652 @@ def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, spec = spectral_normalize_torch(spec) return spec + + +########### +# Commons # +########### + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size*dilation - dilation)/2) + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d( + length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (num_timescales - 1)) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + device = duration.device + + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2,3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1. / norm_type) + return total_norm + + +############## +# Attentions # +############## +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert t_s == t_t, "Local attention is only available for self-attention." + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + max_relative_position = 2 * self.window_size + 1 + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0,0],[0,0],[0,length-1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) + x_flat = x.view([batch, heads, length**2 + length*(length -1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + +############## +# Transforms # +############## + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = { + 'tails': tails, + 'tail_bound': tail_bound + } + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum( + inputs[..., None] >= bin_locations, + dim=-1 + ) - 1 + + +def unconstrained_rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == 'linear': + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError('{} tails are not implemented.'.format(tails)) + + outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative + ) + + return outputs, logabsdet + +def rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0., right=1., bottom=0., top=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError('Input to a transform is not within its domain') + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError('Minimal bin width too large for the number of bins') + if min_bin_height * num_bins > 1.0: + raise ValueError('Minimal bin height too large for the number of bins') + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (((inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta) + + input_heights * (input_delta - input_derivatives))) + b = (input_heights * input_derivatives + - (inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta)) + c = - input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet From aa29183cc040b0082b2aec7d795b4617d77f1d4c Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 15:33:16 -0500 Subject: [PATCH 017/244] Remove old module calls --- nemo/collections/tts/modules/vits_modules.py | 40 ++++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index ce08e72e3b26..a85b72bf577c 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -25,7 +25,7 @@ from nemo.collections.common.parts.preprocessing import parsers from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.monotonic_align import maximum_path +from nemo.collections.tts.monotonic_align import maximum_path from nemo.core.classes.common import PretrainedModelInfo, typecheck from nemo.core.neural_types.elements import ( MelSpectrogramType, @@ -39,7 +39,7 @@ from nemo.utils import logging from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss -#from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator +#from nemo.collections.tts.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator LRELU_SLOPE = 0.1 @@ -430,25 +430,25 @@ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows self.n_flows = n_flows self.gin_channels = gin_channels - self.log_flow = modules.Log() + self.log_flow = Log() self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) + self.flows.append(ElementwiseAffine(2)) for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) + self.flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(Flip()) self.post_pre = nn.Conv1d(1, filter_channels, 1) self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) + self.post_flows.append(ElementwiseAffine(2)) for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) + self.post_flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(Flip()) self.pre = nn.Conv1d(in_channels, filter_channels, 1) self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) if gin_channels != 0: self.cond = nn.Conv1d(gin_channels, filter_channels, 1) @@ -512,9 +512,9 @@ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_cha self.drop = nn.Dropout(p_dropout) self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) + self.norm_1 = LayerNorm(filter_channels) self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) + self.norm_2 = LayerNorm(filter_channels) self.proj = nn.Conv1d(filter_channels, 1, 1) if gin_channels != 0: @@ -601,8 +601,8 @@ def __init__(self, self.flows = nn.ModuleList() for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) + self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append(Flip()) def forward(self, x, x_mask, g=None, reverse=False): if not reverse: @@ -633,7 +633,7 @@ def __init__(self, self.gin_channels = gin_channels self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): @@ -652,7 +652,7 @@ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_di self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 + resblock = ResBlock1 if resblock == '1' else ResBlock2 self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): @@ -678,7 +678,7 @@ def forward(self, x, g=None): x = x + self.cond(g) for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = F.leaky_relu(x, LRELU_SLOPE) x = self.ups[i](x) xs = None for j in range(self.num_kernels): @@ -729,7 +729,7 @@ def forward(self, x): for l in self.convs: x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) x = self.conv_post(x) fmap.append(x) @@ -757,7 +757,7 @@ def forward(self, x): for l in self.convs: x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) + x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) x = self.conv_post(x) fmap.append(x) From bacca6fe59fef9b9ebb73aec557a118d17a3e715 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 15:36:33 -0500 Subject: [PATCH 018/244] Fix typo in monotonic align import --- nemo/collections/tts/modules/vits_modules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index a85b72bf577c..50f7b7fe0821 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -25,7 +25,7 @@ from nemo.collections.common.parts.preprocessing import parsers from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.monotonic_align import maximum_path +from nemo.collections.tts.modules.monotonic_align import maximum_path from nemo.core.classes.common import PretrainedModelInfo, typecheck from nemo.core.neural_types.elements import ( MelSpectrogramType, From 79bdc7c233bf8499d187cccebcaf5e26b3b7a012 Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Tue, 16 Nov 2021 18:28:59 -0500 Subject: [PATCH 019/244] Modified validation step 1. reverted to tensorboard 2. validation_step logs audio, mel-spec for batch 0 3. validation_step_alt logs audio, mel-spec for batch 0 and loss_mel --- nemo/collections/tts/models/vits.py | 144 ++++++++++++++++++++-------- 1 file changed, 103 insertions(+), 41 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 645cec378464..57ed7de0968d 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -38,13 +38,6 @@ import nemo.collections.tts.modules.vits_modules as modules from nemo.collections.tts.modules.vits_modules import init_weights, get_padding, SynthesizerTrn, MultiPeriodDiscriminator - -HAVE_WANDB = True -try: - import wandb -except ModuleNotFoundError: - HAVE_WANDB = False - @dataclass class VitsConfig: parser: Dict[Any, Any] = MISSING @@ -165,7 +158,7 @@ def forward(self, batch, batch_idx): y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.model.hop_size - return y_hat[0, :, :y_hat_lengths[0]] + return y_hat, y_lengths def training_step(self, batch, batch_idx): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -239,7 +232,6 @@ def training_step(self, batch, batch_idx): "loss_disc_all": loss_disc_all, } self.log_dict(metrics, on_step=True, sync_dist=True) - self.log("scaled loss_mel", loss_mel, prog_bar=True, logger=False, sync_dist=True) def validation_step(self, batch, batch_idx): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -251,41 +243,111 @@ def validation_step(self, batch, batch_idx): mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - loss_mel = F.l1_loss(mel, y_hat_mel) + # plot audio once per epoch + if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: + self.logger.experiment.add_audio( + "val_wav_target", + y[0, : y_lengths[0]].data.cpu().numpy(), + self.global_step, + sample_rate=self.sample_rate, + ) + + self.logger.experiment.add_audio( + "val_wav_predicted", + y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), + self.global_step, + sample_rate=self.sample_rate, + ) - self.log_dict({"val_loss": loss_mel}, on_epoch=True, sync_dist=True) + self.logger.experiment.add_image( + "val_mel_target", + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + self.logger.experiment.add_image( + "val_mel_predicted", + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + + def validation_step_alt(self, batch, batch_idx): + + (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch # plot audio once per epoch - if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB: - clips = [] - specs = [] - - for i in range(min(5, y.shape[0])): - clips += [ - wandb.Audio( - y[i, : y_lengths[i]].data.cpu().numpy(), - caption=f"real audio {i}", - sample_rate=self.hps.data.sampling_rate, - ), - wandb.Audio( - y_hat[i, : y_hat_lengths[i]].data.cpu().numpy().astype('float32'), - caption=f"generated audio {i}", - sample_rate=self.hps.data.sampling_rate, - ), - ] - - specs += [ - wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[i, :, : y_hat_mel_lengths[i]].data.cpu().numpy()), - caption=f"output mel {i}", - ), - wandb.Image( - plot_spectrogram_to_numpy(mel[i, :, : mel_lengths[i]].cpu().numpy()), - caption=f"gt mel {i}", - ), - ] - - self.logger.experiment.log({"audio": clips, "specs": specs}) + if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: + + y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self.hps.data.hop_length + + # Note to modify the functions / use the ones in NeMo, we need the lengths + mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + + self.logger.experiment.add_audio( + "val_wav_target", + y[0, : y_lengths[0]].data.cpu().numpy(), + self.global_step, + sample_rate=self.sample_rate, + ) + + self.logger.experiment.add_audio( + "val_wav_predicted", + y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), + self.global_step, + sample_rate=self.sample_rate, + ) + + self.logger.experiment.add_image( + "val_mel_target", + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + self.logger.experiment.add_image( + "val_mel_predicted", + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ + (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) + mel = modules.spec_to_mel_torch( + spec, + self._cfg.model.train_ds.filter_length, + self._cfg.model.n_mel_channels, + self._cfg.model.sample_rate, + self._cfg.model.mel_fmin, + self._cfg.model.mel_fmax + ) + mel = modules.spec_to_mel_torch( + spec, + self._cfg.model.train_ds.filter_length, + self._cfg.model.n_mel_channels, + self._cfg.model.sample_rate, + self._cfg.model.mel_fmin, + self._cfg.model.mel_fmax + ) + y_mel = modules.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) + y_hat_mel = modules.mel_spectrogram_torch( + y_hat.squeeze(1), + self._cfg.model.train_ds.filter_length, + self._cfg.model.n_mel_channels, + self._cfg.model.sample_rate, + self._cfg.model.hop_size, + self._cfg.model.preprocessing.n_window_size, + self._cfg.model.mel_fmin, + self._cfg.model.mel_fmax + ) + + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.model.c_mel + self.log_dict({"val_loss * c_mel": loss_mel}, on_epoch=True, sync_dist=True) @staticmethod def _loader(cfg): From 452af09c5976485e38c225aeb610d6b121371f6b Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 15:26:11 -0500 Subject: [PATCH 020/244] Fix imports for VITS --- nemo/collections/tts/models/vits.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 57ed7de0968d..f4a7464e8f8c 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -37,6 +37,16 @@ from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss import nemo.collections.tts.modules.vits_modules as modules from nemo.collections.tts.modules.vits_modules import init_weights, get_padding, SynthesizerTrn, MultiPeriodDiscriminator +<<<<<<< HEAD +======= + + +HAVE_WANDB = True +try: + import wandb +except ModuleNotFoundError: + HAVE_WANDB = False +>>>>>>> 8ddb3dfb5... Fix all imports @dataclass class VitsConfig: From f32cbe5a2fb1b3c4528bf4adc20e048caca9f7f5 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 15:33:16 -0500 Subject: [PATCH 021/244] Remove old module calls --- nemo/collections/tts/modules/vits_modules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 50f7b7fe0821..a85b72bf577c 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -25,7 +25,7 @@ from nemo.collections.common.parts.preprocessing import parsers from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.monotonic_align import maximum_path +from nemo.collections.tts.monotonic_align import maximum_path from nemo.core.classes.common import PretrainedModelInfo, typecheck from nemo.core.neural_types.elements import ( MelSpectrogramType, From 6cad1bd46db04cb340f816c95928e3f6e81df7fe Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 11 Nov 2021 15:36:33 -0500 Subject: [PATCH 022/244] Fix typo in monotonic align import --- nemo/collections/tts/modules/vits_modules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index a85b72bf577c..50f7b7fe0821 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -25,7 +25,7 @@ from nemo.collections.common.parts.preprocessing import parsers from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.monotonic_align import maximum_path +from nemo.collections.tts.modules.monotonic_align import maximum_path from nemo.core.classes.common import PretrainedModelInfo, typecheck from nemo.core.neural_types.elements import ( MelSpectrogramType, From dc38284d8b61ae37031ef29fb0a3bac23aa02bec Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Tue, 16 Nov 2021 18:28:59 -0500 Subject: [PATCH 023/244] Modified validation step 1. reverted to tensorboard 2. validation_step logs audio, mel-spec for batch 0 3. validation_step_alt logs audio, mel-spec for batch 0 and loss_mel --- nemo/collections/tts/models/vits.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index f4a7464e8f8c..ac7907f95379 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -40,6 +40,7 @@ <<<<<<< HEAD ======= +<<<<<<< HEAD HAVE_WANDB = True try: @@ -48,6 +49,8 @@ HAVE_WANDB = False >>>>>>> 8ddb3dfb5... Fix all imports +======= +>>>>>>> e8f520f47... Modified validation step @dataclass class VitsConfig: parser: Dict[Any, Any] = MISSING @@ -268,6 +271,24 @@ def validation_step(self, batch, batch_idx): self.global_step, sample_rate=self.sample_rate, ) +<<<<<<< HEAD + + self.logger.experiment.add_image( + "val_mel_target", + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + self.logger.experiment.add_image( + "val_mel_predicted", + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + +======= self.logger.experiment.add_image( "val_mel_target", @@ -284,6 +305,7 @@ def validation_step(self, batch, batch_idx): ) +>>>>>>> e8f520f47... Modified validation step def validation_step_alt(self, batch, batch_idx): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch From 3315eb923353c187e0299f163f50ac19c7561d6a Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Tue, 16 Nov 2021 18:53:20 -0500 Subject: [PATCH 024/244] Add parameters from original VITS config --- examples/tts/conf/vits.yaml | 99 ++++++++++++++++++++++------- examples/tts/vits.py | 4 +- nemo/collections/tts/models/vits.py | 7 +- 3 files changed, 84 insertions(+), 26 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 10cb3389a3ba..3da7555eb780 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -40,30 +40,85 @@ model: mel_fmax: null train_ds: - manifest_filepath: ${train_dataset} - max_duration: null - min_duration: 0.1 - sample_rate: ${model.sample_rate} - trim: false - parser: null - drop_last: true - shuffle: true - batch_size: 64 - num_workers: 12 - max_wav_value: 32768.0 - filter_length: 1024 + dataset: + _target_: "nemo.collections.tts.torch.data.MixerTTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${n_fft} + win_length: ${n_window_size} + hop_length: ${n_window_stride} + window: ${window} + n_mels: ${n_mels} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + text_tokenizer: + _target_: "nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer" + punct: True + stresses: True + chars: True + space: ' ' + silence: null + apostrophe: True + sep: '|' + add_blank_at: null + pad_with_space: True + g2p: + _target_: "nemo.collections.tts.torch.g2ps.EnglishG2p" + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + dataloader_params: + drop_last: false + shuffle: true + batch_size: 64 + num_workers: 4 + pin_memory: false validation_ds: - manifest_filepath: ${validation_datasets} - sample_rate: ${model.sample_rate} - trim: false - parser: null - drop_last: false - shuffle: false - batch_size: 64 - num_workers: 8 - max_wav_value: 32768.0 - filter_length: 1024 + dataset: + _target_: "nemo.collections.tts.torch.data.MixerTTSDataset" + manifest_filepath: ${validation_datasets} + sample_rate: ${sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${n_fft} + win_length: ${n_window_size} + hop_length: ${n_window_stride} + window: ${window} + n_mels: ${n_mels} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + text_tokenizer: + _target_: "nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer" + punct: True + stresses: True + chars: True + space: ' ' + silence: null + apostrophe: True + sep: '|' + add_blank_at: null + pad_with_space: True + g2p: + _target_: "nemo.collections.tts.torch.g2ps.EnglishG2p" + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + preprocessor: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 9000f4d696c8..56c48c331840 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -16,7 +16,7 @@ from pytorch_lightning.plugins import DDPPlugin from nemo.collections.common.callbacks import LogEpochTimeCallback -from nemo.collections.tts.models.vits import Vits +from nemo.collections.tts.models.vits import VitsModel from nemo.core.config import hydra_runner from nemo.utils.exp_manager import exp_manager @@ -25,7 +25,7 @@ def main(cfg): trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) - model = Vits(cfg=cfg.model, trainer=trainer) + model = VitsModel(cfg=cfg.model, trainer=trainer) lr_logger = pl.callbacks.LearningRateMonitor() epoch_time_logger = LogEpochTimeCallback() trainer.callbacks.extend([lr_logger, epoch_time_logger]) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index ac7907f95379..843202484815 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -63,6 +63,7 @@ class VitsConfig: class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): + if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) @@ -75,7 +76,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): abbreviation_version="fastpitch", make_table=False, ) - + super().__init__(cfg=cfg, trainer=trainer) schema = OmegaConf.structured(VitsConfig) @@ -384,7 +385,8 @@ def validation_step_alt(self, batch, batch_idx): @staticmethod def _loader(cfg): try: - _ = cfg.dataset.manifest_filepath + # _ = cfg.model.train_ds.manifest_filepath + _ = cfg['manifest_filepath'] except omegaconf.errors.MissingMandatoryValue: logging.warning("manifest_filepath was skipped. No dataset for this model.") return None @@ -393,6 +395,7 @@ def _loader(cfg): return torch.utils.data.DataLoader( # noqa dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, ) + def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) From 75a03116d2f6a958571e13865c171ff87e671922 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Tue, 16 Nov 2021 20:27:40 -0500 Subject: [PATCH 025/244] Fix config file --- examples/tts/conf/vits.yaml | 55 ++++++++++--- nemo/collections/tts/models/vits.py | 115 ++++++++++++++++------------ 2 files changed, 111 insertions(+), 59 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 3da7555eb780..f2966a209b82 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -5,6 +5,35 @@ labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', ' train_dataset: ??? validation_datasets: ??? test_datasets: null +sample_rate: 22050 +sup_data_path: null +sup_data_types: null + +# LJSpeech stats (per frame), train +pitch_mean: 212.35873413085938 +pitch_std: 68.52806091308594 + +# default values from librosa.pyin +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +# default values for sample_rate=22050 +n_mels: 80 +n_window_size: 1024 +n_window_stride: 256 +n_fft: 1024 +lowfreq: 0 +highfreq: 8000 +window: "hann" + +pitch_loss_scale: 0.1 +durs_loss_scale: 0.1 +mel_loss_scale: 1.0 + +phoneme_dict_path: null # "scripts/tts_dataset_files/cmudict-0.7b-030921" +heteronyms_path: null # "scripts/tts_dataset_files/heteronyms-030921" + +filter_length: 768 model: sample_rate: 22050 @@ -31,10 +60,11 @@ model: c_kl: 1. inter_channels: 192 hidden_channels: 192 - filter_channels: 768 + filter_channels: ${filter_length} n_heads: 2 p_dropout: 0.1 n_layers_q: 3 + n_layers: 6 use_spectral_norm: false mel_fmin: 0.0 mel_fmax: null @@ -119,6 +149,13 @@ model: phoneme_dict: ${phoneme_dict_path} heteronyms: ${heteronyms_path} + dataloader_params: + drop_last: false + shuffle: true + batch_size: 64 + num_workers: 1 + pin_memory: false + preprocessor: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures @@ -172,19 +209,19 @@ model: duration_predictor: _target_: nemo.collections.tts.modules.vits_modules.StochasticDurationPredictor - input_size: ${model.symbols_embedding_dim} + in_channels: ${model.symbols_embedding_dim} # input_size: ${model.symbols_embedding_dim} kernel_size: 3 - filter_size: 256 - dropout: 0.1 - n_layers: 6 + filter_channels: ${filter_length} # filter_size: 256 + p_dropout: 0.1 # dropout: 0.1 + # n_layers: 6 pitch_predictor: _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor input_size: ${model.symbols_embedding_dim} kernel_size: 3 - filter_size: 256 + filter_size: ${filter_length} dropout: 0.1 - n_layers: 6 + n_layers: ${model.n_layers} generator: _target_: nemo.collections.tts.modules.vits_modules.Generator @@ -194,7 +231,7 @@ model: upsample_rates: [8,8,2,2] upsample_initial_channel: 512 upsample_kernel_sizes: [16,16,4,4] - initial_input_size: 384 + initial_channel: 384 # initial_input_size: 384 trainer: gpus: -1 # number of gpus @@ -204,7 +241,7 @@ trainer: accumulate_grad_batches: 1 checkpoint_callback: False # Provided by exp_manager logger: False # Provided by exp_manager - gradient_clip_val: 1000.0 + # gradient_clip_val: 1000.0 flush_logs_every_n_steps: 1000 log_every_n_steps: 100 check_val_every_n_epoch: 5 diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 843202484815..e75ffdb79f20 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -126,11 +126,26 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.hop_size = cfg.hop_size self.net_g = SynthesizerTrn( - # len(symbols), - self.hps.data.filter_length // 2 + 1, - self.hps.train.segment_size // self.hps.data.hop_length, - **self.hps.model) - self.net_d = MultiPeriodDiscriminator(self.hps.model.use_spectral_norm) + n_vocab = cfg.symbols_embedding_dim, + spec_channels = cfg.n_mel_channels, + segment_size = cfg.segment_size, + inter_channels = cfg.inter_channels, + hidden_channels = cfg.hidden_channels, + filter_channels = cfg.filter_channels, + n_heads = cfg.n_heads, + n_layers = cfg.n_layers, + kernel_size = cfg.pitch_embedding_kernel_size, + p_dropout = cfg.p_dropout, + resblock = cfg.generator.resblock, + resblock_kernel_sizes = cfg.generator.resblock_kernel_sizes, + resblock_dilation_sizes = cfg.generator.resblock_dilation_sizes, + upsample_rates = cfg.generator.upsample_rates, + upsample_initial_channel = cfg.generator.upsample_initial_channel, + upsample_kernel_sizes = cfg.generator.upsample_kernel_sizes, + # cfg.filter_channels // 2 + 1, + # cfg.segment_size // cfg.train_ds.dataset.hop_length, + ) + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) self.automatic_optimization = False def parse(self, str_input: str) -> torch.tensor: @@ -140,21 +155,21 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): self.optim_g = torch.optim.AdamW( self.net_g.parameters(), - self._cfg.model.lr, - betas=self._cfg.model.betas, - eps=self._cfg.model.eps) + self._cfg.lr, + betas=self._cfg.betas, + eps=self._cfg.eps) self.optim_d = torch.optim.AdamW( self.net_d.parameters(), - self._cfg.model.lr, - betas=self._cfg.model.betas, - eps=self._cfg.model.eps) + self._cfg.lr, + betas=self._cfg.betas, + eps=self._cfg.eps) - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(self.optim_g, gamma=self._cfg.model.lr_decay) + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(self.optim_g, gamma=self._cfg.lr_decay) scheduler_g_dict = { 'scheduler': scheduler_g, 'interval': 'step', } - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(self.optim_d, gamma=self._cfg.model.lr_decay) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(self.optim_d, gamma=self._cfg.lr_decay) scheduler_d_dict = { 'scheduler': scheduler_d, 'interval': 'step' @@ -170,7 +185,7 @@ def forward(self, batch, batch_idx): x_lengths = x_lengths[:1] y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.model.hop_size + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.hop_size return y_hat, y_lengths @@ -182,24 +197,24 @@ def training_step(self, batch, batch_idx): (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) mel = modules.spec_to_mel_torch( spec, - self._cfg.model.train_ds.filter_length, - self._cfg.model.n_mel_channels, - self._cfg.model.sample_rate, - self._cfg.model.mel_fmin, - self._cfg.model.mel_fmax + self._cfg.train_ds.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax ) - y_mel = modules.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) + y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) y_hat_mel = modules.mel_spectrogram_torch( y_hat.squeeze(1), - self._cfg.model.train_ds.filter_length, - self._cfg.model.n_mel_channels, - self._cfg.model.sample_rate, - self._cfg.model.hop_size, - self._cfg.model.preprocessing.n_window_size, - self._cfg.model.mel_fmin, - self._cfg.model.mel_fmax + self._cfg.train_ds.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.hop_size, + self._cfg.preprocessing.n_window_size, + self._cfg.mel_fmin, + self._cfg.mel_fmax ) - y = modules.slice_segments(y, ids_slice * self._cfg.model.hop_size, self._cfg.model.segment_size) # slice + y = modules.slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc @@ -215,8 +230,8 @@ def training_step(self, batch, batch_idx): y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.model.c_mel - loss_kl = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.model.c_kl + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.c_kl loss_fm = self.feat_matching_loss(fmap_r, fmap_g) loss_gen, losses_gen = self.gen_loss(y_d_hat_g) @@ -353,40 +368,40 @@ def validation_step_alt(self, batch, batch_idx): (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) mel = modules.spec_to_mel_torch( spec, - self._cfg.model.train_ds.filter_length, - self._cfg.model.n_mel_channels, - self._cfg.model.sample_rate, - self._cfg.model.mel_fmin, - self._cfg.model.mel_fmax + self._cfg.train_ds.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax ) mel = modules.spec_to_mel_torch( spec, - self._cfg.model.train_ds.filter_length, - self._cfg.model.n_mel_channels, - self._cfg.model.sample_rate, - self._cfg.model.mel_fmin, - self._cfg.model.mel_fmax + self._cfg.train_ds.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax ) - y_mel = modules.slice_segments(mel, ids_slice, self._cfg.model.segment_size // self._cfg.model.hop_size) + y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) y_hat_mel = modules.mel_spectrogram_torch( y_hat.squeeze(1), - self._cfg.model.train_ds.filter_length, - self._cfg.model.n_mel_channels, - self._cfg.model.sample_rate, - self._cfg.model.hop_size, - self._cfg.model.preprocessing.n_window_size, - self._cfg.model.mel_fmin, - self._cfg.model.mel_fmax + self._cfg.train_ds.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.hop_size, + self._cfg.preprocessing.n_window_size, + self._cfg.mel_fmin, + self._cfg.mel_fmax ) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.model.c_mel + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel self.log_dict({"val_loss * c_mel": loss_mel}, on_epoch=True, sync_dist=True) @staticmethod def _loader(cfg): try: # _ = cfg.model.train_ds.manifest_filepath - _ = cfg['manifest_filepath'] + _ = cfg['dataset']['manifest_filepath'] except omegaconf.errors.MissingMandatoryValue: logging.warning("manifest_filepath was skipped. No dataset for this model.") return None From 5e5c2f391a64d2eb4b9e25f440b487c34b4d0ac2 Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Tue, 23 Nov 2021 12:28:46 -0500 Subject: [PATCH 026/244] Fix imports and generate spec from audio --- nemo/collections/tts/models/vits.py | 76 ++++++------ .../tts/modules/vits_mel_processing.py | 112 ++++++++++++++++++ nemo/collections/tts/modules/vits_modules.py | 44 ++----- 3 files changed, 161 insertions(+), 71 deletions(-) create mode 100644 nemo/collections/tts/modules/vits_mel_processing.py diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e75ffdb79f20..1eda051cb6a3 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -1,56 +1,30 @@ from dataclasses import dataclass -from itertools import chain from typing import Any, Dict -import numpy as np +import omegaconf import torch from hydra.utils import instantiate -import omegaconf from omegaconf import MISSING, DictConfig, OmegaConf from pytorch_lightning import Trainer -from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger, WandbLogger -import torch -from torch import nn -from torch.nn import functional as F - from torch.cuda.amp import autocast +from torch.nn import functional as F -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from nemo.collections.asr.data.audio_to_text import FastPitchDataset +import nemo.collections.tts.modules.vits_modules as modules from nemo.collections.common.parts.preprocessing import parsers -from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy +from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform -from nemo.core.classes.common import PretrainedModelInfo, typecheck -from nemo.core.neural_types.elements import ( - MelSpectrogramType, - RegressionValuesType, - TokenDurationType, - TokenIndex, - TokenLogDurationType, -) -from nemo.core.neural_types.neural_type import NeuralType -from nemo.core.optim.lr_scheduler import NoamAnnealing +from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator +from nemo.collections.tts.torch.data import TTSDataset +from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging +<<<<<<< HEAD from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss import nemo.collections.tts.modules.vits_modules as modules from nemo.collections.tts.modules.vits_modules import init_weights, get_padding, SynthesizerTrn, MultiPeriodDiscriminator -<<<<<<< HEAD -======= -<<<<<<< HEAD - -HAVE_WANDB = True -try: - import wandb -except ModuleNotFoundError: - HAVE_WANDB = False ->>>>>>> 8ddb3dfb5... Fix all imports -======= ->>>>>>> e8f520f47... Modified validation step @dataclass class VitsConfig: parser: Dict[Any, Any] = MISSING @@ -148,6 +122,22 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) self.automatic_optimization = False + window_fn = { + 'hann': torch.hann_window, + 'hamming': torch.hamming_window, + 'blackman': torch.blackman_window, + 'bartlett': torch.bartlett_window, + 'none': None, + }.get(self.window, None) + + self.stft = lambda x: torch.stft( + input=x, + n_fft=self.n_fft, + hop_length=self.hop_len, + win_length=self.win_length, + window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + ) + def parse(self, str_input: str) -> torch.tensor: # TODO: Implement pass @@ -189,8 +179,20 @@ def forward(self, batch, batch_idx): return y_hat, y_lengths + + def get_spec(self, audio): + with torch.cuda.amp.autocast(enabled=False): + spec = self.stft(audio) + if spec.dtype in [torch.cfloat, torch.cdouble]: + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) + return spec + def training_step(self, batch, batch_idx): - (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch + (x, x_lengths, y, y_lengths) = batch + + spec = self.get_spec(y) + spec_lengths = torch.ones(spec.shape[0]) * spec.shape[2] with autocast(enabled=False): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ @@ -263,7 +265,7 @@ def training_step(self, batch, batch_idx): self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): - (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch + (x, x_lengths, y, y_lengths) = batch y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) y_hat_lengths = mask.sum([1, 2]).long() * self.hps.data.hop_length diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py new file mode 100644 index 000000000000..de7b5f1c8ce6 --- /dev/null +++ b/nemo/collections/tts/modules/vits_mel_processing.py @@ -0,0 +1,112 @@ +import math +import os +import random +import torch +from torch import nn +import torch.nn.functional as F +import torch.utils.data +import numpy as np +import librosa +import librosa.util as librosa_util +from librosa.util import normalize, pad_center, tiny +from scipy.signal import get_window +from scipy.io.wavfile import read +from librosa.filters import mel as librosa_mel_fn + +MAX_WAV_VALUE = 32768.0 + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + """ + PARAMS + ------ + C: compression factor + """ + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + """ + PARAMS + ------ + C: compression factor used to compress + """ + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + + return spec \ No newline at end of file diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 50f7b7fe0821..106ecdbf828d 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -1,44 +1,16 @@ -import copy +import numpy as np import math + import numpy as np -import scipy import torch from torch import nn +from torch.nn import Conv1d, ConvTranspose1d, Conv2d from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm -from dataclasses import dataclass -from itertools import chain -from typing import Any, Dict - -from hydra.utils import instantiate -from omegaconf import MISSING, DictConfig, OmegaConf -from pytorch_lightning import Trainer -from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger -import math - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from nemo.collections.asr.data.audio_to_text import FastPitchDataset -from nemo.collections.common.parts.preprocessing import parsers -from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, regulate_len -from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.monotonic_align import maximum_path -from nemo.core.classes.common import PretrainedModelInfo, typecheck -from nemo.core.neural_types.elements import ( - MelSpectrogramType, - RegressionValuesType, - TokenDurationType, - TokenIndex, - TokenLogDurationType, -) -from nemo.core.neural_types.neural_type import NeuralType -from nemo.core.optim.lr_scheduler import NoamAnnealing -from nemo.utils import logging -from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss +from nemo.collections.tts.modules.vits_mel_processing import librosa_mel_fn, spectral_normalize_torch + #from nemo.collections.tts.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator LRELU_SLOPE = 0.1 @@ -1210,12 +1182,16 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) self.norm_layers_2.append(LayerNorm(hidden_channels)) + def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + def forward(self, x, x_mask, h, h_mask): """ x: decoder input h: encoder output """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) + self_attn_mask = self.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask for i in range(self.n_layers): From d1443d07370d6444086346b27fcd875e8298464c Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Tue, 23 Nov 2021 19:15:45 -0500 Subject: [PATCH 027/244] Fix incorrect dimensions --- nemo/collections/tts/models/vits.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 1eda051cb6a3..f05ed81e171a 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -128,7 +128,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): 'blackman': torch.blackman_window, 'bartlett': torch.bartlett_window, 'none': None, - }.get(self.window, None) + }.get(self.hann_window, None) self.stft = lambda x: torch.stft( input=x, @@ -265,10 +265,12 @@ def training_step(self, batch, batch_idx): self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): + # (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch (x, x_lengths, y, y_lengths) = batch - y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self.hps.data.hop_length + + y_hat, attn, mask, *_ = self.net_g.infer(y, y_lengths, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length # Note to modify the functions / use the ones in NeMo, we need the lengths mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) @@ -321,7 +323,7 @@ def validation_step(self, batch, batch_idx): self.global_step, dataformats="HWC", ) - + >>>>>>> e8f520f47... Modified validation step def validation_step_alt(self, batch, batch_idx): From d030ccd79f7fee057a32e86d4461848ce7d1224e Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Tue, 23 Nov 2021 22:10:09 -0500 Subject: [PATCH 028/244] Progress update --- examples/tts/conf/vits.yaml | 16 +++++----- nemo/collections/tts/losses/vits_losses.py | 4 +-- nemo/collections/tts/models/vits.py | 32 ++++++++++++-------- nemo/collections/tts/modules/vits_modules.py | 5 ++- 4 files changed, 35 insertions(+), 22 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index f2966a209b82..348db1aab6d1 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -33,7 +33,8 @@ mel_loss_scale: 1.0 phoneme_dict_path: null # "scripts/tts_dataset_files/cmudict-0.7b-030921" heteronyms_path: null # "scripts/tts_dataset_files/heteronyms-030921" -filter_length: 768 +filter_channels: 768 +filter_length: 1024 model: sample_rate: 22050 @@ -43,7 +44,7 @@ model: n_speakers: 1 symbols_embedding_dim: 384 max_token_duration: 75 - n_mel_channels: 80 + n_mel_channels: ${n_mels} pitch_embedding_kernel_size: 3 mel_loss_coeff: 40 hop_size: 256 @@ -60,7 +61,8 @@ model: c_kl: 1. inter_channels: 192 hidden_channels: 192 - filter_channels: ${filter_length} + filter_channels: ${filter_channels} + filter_length: ${filter_length} n_heads: 2 p_dropout: 0.1 n_layers_q: 3 @@ -108,7 +110,7 @@ model: dataloader_params: drop_last: false shuffle: true - batch_size: 64 + batch_size: 2 num_workers: 4 pin_memory: false @@ -152,7 +154,7 @@ model: dataloader_params: drop_last: false shuffle: true - batch_size: 64 + batch_size: 2 num_workers: 1 pin_memory: false @@ -169,7 +171,7 @@ model: lowfreq: 0 mag_power: 1.0 n_fft: 1024 - n_window_size: 1024 + n_window_size: ${n_window_size} n_window_stride: ${model.hop_size} normalize: null pad_to: 1 @@ -225,7 +227,7 @@ model: generator: _target_: nemo.collections.tts.modules.vits_modules.Generator - resblock: 1 + resblock: "1" resblock_kernel_sizes: [3,7,11] resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] upsample_rates: [8,8,2,2] diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 330cbbe44c2b..e2c197350732 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -47,7 +47,7 @@ def output_types(self): "fake_losses": [NeuralType(elements_type=LossType())], } - def forward(disc_real_outputs, disc_generated_outputs): + def forward(self, disc_real_outputs, disc_generated_outputs): loss = 0 r_losses = [] g_losses = [] @@ -109,7 +109,7 @@ def output_types(self): } @typecheck() - def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): + def kl_loss(self, z_p, logs_q, m_p, logs_p, z_mask): """ z_p, logs_q: [b, h, t_t] m_p, logs_p: [b, h, t_t] diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index f05ed81e171a..0ae72ec601ba 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -98,11 +98,13 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.splice_length = cfg.splice_length self.sample_rate = cfg.sample_rate self.hop_size = cfg.hop_size + self.n_fft = cfg.train_ds.dataset.n_fft + self.win_length = cfg.train_ds.dataset.win_length self.net_g = SynthesizerTrn( n_vocab = cfg.symbols_embedding_dim, - spec_channels = cfg.n_mel_channels, - segment_size = cfg.segment_size, + spec_channels = cfg.train_ds.dataset.n_fft // 2 + 1, + segment_size = cfg.segment_size // cfg.train_ds.dataset.hop_length, inter_channels = cfg.inter_channels, hidden_channels = cfg.hidden_channels, filter_channels = cfg.filter_channels, @@ -133,7 +135,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.stft = lambda x: torch.stft( input=x, n_fft=self.n_fft, - hop_length=self.hop_len, + hop_length=self.hop_size, win_length=self.win_length, window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, ) @@ -168,6 +170,7 @@ def configure_optimizers(self): def forward(self, batch, batch_idx): with torch.no_grad(): + # TODO: Fix (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch # remove else @@ -189,7 +192,8 @@ def get_spec(self, audio): return spec def training_step(self, batch, batch_idx): - (x, x_lengths, y, y_lengths) = batch + # (x, x_lengths, y, y_lengths) = batch + (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) spec_lengths = torch.ones(spec.shape[0]) * spec.shape[2] @@ -199,7 +203,7 @@ def training_step(self, batch, batch_idx): (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) mel = modules.spec_to_mel_torch( spec, - self._cfg.train_ds.filter_length, + self._cfg.filter_length, self._cfg.n_mel_channels, self._cfg.sample_rate, self._cfg.mel_fmin, @@ -208,14 +212,15 @@ def training_step(self, batch, batch_idx): y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) y_hat_mel = modules.mel_spectrogram_torch( y_hat.squeeze(1), - self._cfg.train_ds.filter_length, + self._cfg.filter_length, self._cfg.n_mel_channels, self._cfg.sample_rate, self._cfg.hop_size, - self._cfg.preprocessing.n_window_size, + self._cfg.preprocessor.n_window_size, self._cfg.mel_fmin, self._cfg.mel_fmax ) + y = torch.unsqueeze(y, 1) y = modules.slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) @@ -266,15 +271,18 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): # (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch - (x, x_lengths, y, y_lengths) = batch + (y, y_lengths, x, x_lengths) = batch - - y_hat, attn, mask, *_ = self.net_g.infer(y, y_lengths, max_len=1000) + y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) + y_hat = y_hat.squeeze() y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length + # spec = self.get_spec(y) + # spec_lengths = torch.ones(spec.shape[0]) * spec.shape[2] + # Note to modify the functions / use the ones in NeMo, we need the lengths - mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) # plot audio once per epoch if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 106ecdbf828d..a99241147c9d 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -609,7 +609,7 @@ def __init__(self, self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device='cuda') x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask @@ -912,6 +912,8 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): # Mel_processing # ################## +mel_basis = {} +hann_window = {} def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): global mel_basis @@ -1013,6 +1015,7 @@ def rand_slice_segments(x, x_lengths=None, segment_size=4): if x_lengths is None: x_lengths = t ids_str_max = x_lengths - segment_size + 1 + ids_str_max = ids_str_max.to(device=x.device) ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) ret = slice_segments(x, ids_str, segment_size) return ret, ids_str From 080fabc5732eb460b280fc45c9324443a6270ec1 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Tue, 23 Nov 2021 22:24:10 -0500 Subject: [PATCH 029/244] Fix loss --- nemo/collections/tts/losses/vits_losses.py | 12 ++++++------ nemo/collections/tts/models/vits.py | 7 +++---- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index e2c197350732..0fefe839013f 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -95,11 +95,11 @@ class KlLoss(Loss): @property def input_types(self): return { - "z_p": [NeuralType(('B', 'T'), VoidType())], - "logs_q": [NeuralType(('B', 'T'), VoidType())], - "m_p": [NeuralType(('B', 'T'), VoidType())], - "logs_p": [NeuralType(('B', 'T'), VoidType())], - "z_mask": [NeuralType(('B', 'T'), VoidType())], + "z_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "logs_q": [NeuralType(('B', 'D', 'T'), VoidType())], + "m_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "logs_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "z_mask": [NeuralType(('B', 'D', 'T'), VoidType())], } @property @@ -109,7 +109,7 @@ def output_types(self): } @typecheck() - def kl_loss(self, z_p, logs_q, m_p, logs_p, z_mask): + def forward(self, z_p, logs_q, m_p, logs_p, z_mask): """ z_p, logs_q: [b, h, t_t] m_p, logs_p: [b, h, t_t] diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 0ae72ec601ba..aedaaf20b460 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -238,10 +238,9 @@ def training_step(self, batch, batch_idx): with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel - loss_kl = self.kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * self._cfg.c_kl - - loss_fm = self.feat_matching_loss(fmap_r, fmap_g) - loss_gen, losses_gen = self.gen_loss(y_d_hat_g) + loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmag_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl # train generator From bf304b3691257ba9445282bb43b9536da8308415 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Tue, 23 Nov 2021 22:26:33 -0500 Subject: [PATCH 030/244] Fix cuda thing --- nemo/collections/tts/modules/vits_modules.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index a99241147c9d..3f85fdac6f3c 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -609,7 +609,7 @@ def __init__(self, self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device='cuda') + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask From 3f0e3b514e4597fbcd203176912f8a636f700305 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 24 Nov 2021 02:55:05 -0500 Subject: [PATCH 031/244] Fix monotonic align import --- .../tts/modules/monotonic_align/__init__.py | 2 +- setup.py | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 3d7009c40fea..9293c5af5d4a 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -1,6 +1,6 @@ import numpy as np import torch -from .monotonic_align.core import maximum_path_c +from .core import maximum_path_c def maximum_path(neg_cent, mask): diff --git a/setup.py b/setup.py index 9c65529e9f36..7472fe864a43 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,10 @@ from itertools import chain import importlib.util +from distutils.core import setup +from Cython.Build import cythonize +import numpy + import setuptools @@ -105,6 +109,15 @@ def req_file(filename, folder="requirements"): tests_requirements = extras_require["test"] +############################################################################### +# Monotonic Align # +# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # + +setup( + name = 'monotonic_align', + ext_modules = cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), + include_dirs=[numpy.get_include()] +) ############################################################################### # Code style checkers # From 66e3e646038302c4520bad0059ce856e3d8bd088 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 24 Nov 2021 16:34:30 -0500 Subject: [PATCH 032/244] Fix typos in vits.py --- nemo/collections/tts/models/vits.py | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index aedaaf20b460..f373bde576b1 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -18,7 +18,6 @@ from nemo.collections.tts.torch.data import TTSDataset from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging -<<<<<<< HEAD from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss import nemo.collections.tts.modules.vits_modules as modules @@ -239,7 +238,7 @@ def training_step(self, batch, batch_idx): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmag_g=fmap_g) + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl @@ -298,7 +297,6 @@ def validation_step(self, batch, batch_idx): self.global_step, sample_rate=self.sample_rate, ) -<<<<<<< HEAD self.logger.experiment.add_image( "val_mel_target", @@ -314,25 +312,6 @@ def validation_step(self, batch, batch_idx): dataformats="HWC", ) - -======= - - self.logger.experiment.add_image( - "val_mel_target", - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - self.logger.experiment.add_image( - "val_mel_predicted", - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - ->>>>>>> e8f520f47... Modified validation step def validation_step_alt(self, batch, batch_idx): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch From 0a7a3cbf8d52a7b331a5a8be527ca3393bd91289 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 24 Nov 2021 17:02:53 -0500 Subject: [PATCH 033/244] Disable loss typecheck --- nemo/collections/tts/losses/vits_losses.py | 2 +- nemo/collections/tts/models/vits.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 0fefe839013f..bee515d3730d 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -19,7 +19,7 @@ def output_types(self): "loss": NeuralType(elements_type=LossType()), } - @typecheck() + # @typecheck() def forward(self, fmap_r, fmap_g): loss = 0 for dr, dg in zip(fmap_r, fmap_g): diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index f373bde576b1..503404adcc23 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -265,7 +265,9 @@ def training_step(self, batch, batch_idx): "losses_disc_g": losses_disc_g, "loss_disc_all": loss_disc_all, } - self.log_dict(metrics, on_step=True, sync_dist=True) + + # TODO: Fix logging + # self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): # (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch From e56a53971842efbe415883e696fb5513f3778ccd Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 22 Dec 2021 20:38:03 -0500 Subject: [PATCH 034/244] Fix spectrogram lengths --- nemo/collections/tts/models/vits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 503404adcc23..e464e985d5fd 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -195,7 +195,7 @@ def training_step(self, batch, batch_idx): (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) - spec_lengths = torch.ones(spec.shape[0]) * spec.shape[2] + spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) with autocast(enabled=False): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ From df7e99660e5c7f957fdbec153dbeb1b239579ce6 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 22 Dec 2021 21:09:27 -0500 Subject: [PATCH 035/244] Remove Precision 16 requirement --- examples/tts/conf/vits.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 348db1aab6d1..25b0b3e806de 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -247,7 +247,6 @@ trainer: flush_logs_every_n_steps: 1000 log_every_n_steps: 100 check_val_every_n_epoch: 5 - precision: 16 exp_manager: exp_dir: null From 56ae3175fb99151a06bcaf8c59ca323c929a740e Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Wed, 19 Jan 2022 21:40:27 -0500 Subject: [PATCH 036/244] Address lgtm alerts --- examples/tts/vits.py | 1 - nemo/collections/tts/losses/vits_losses.py | 1 - nemo/collections/tts/models/vits.py | 86 +------------------ .../tts/modules/vits_mel_processing.py | 11 --- 4 files changed, 2 insertions(+), 97 deletions(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 56c48c331840..87e76699b7fa 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -13,7 +13,6 @@ # limitations under the License. import pytorch_lightning as pl -from pytorch_lightning.plugins import DDPPlugin from nemo.collections.common.callbacks import LogEpochTimeCallback from nemo.collections.tts.models.vits import VitsModel diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index bee515d3730d..10e1f7836aec 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -1,5 +1,4 @@ import torch -from torch.nn import functional as F from nemo.core.classes import Loss, typecheck from nemo.core.neural_types.elements import LossType, VoidType diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e464e985d5fd..b83e65f16cc5 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -15,13 +15,11 @@ from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator -from nemo.collections.tts.torch.data import TTSDataset from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss import nemo.collections.tts.modules.vits_modules as modules -from nemo.collections.tts.modules.vits_modules import init_weights, get_padding, SynthesizerTrn, MultiPeriodDiscriminator @dataclass @@ -169,7 +167,6 @@ def configure_optimizers(self): def forward(self, batch, batch_idx): with torch.no_grad(): - # TODO: Fix (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch # remove else @@ -179,7 +176,7 @@ def forward(self, batch, batch_idx): y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.hop_size - return y_hat, y_lengths + return y_hat, y_hat_lengths def get_spec(self, audio): @@ -266,8 +263,7 @@ def training_step(self, batch, batch_idx): "loss_disc_all": loss_disc_all, } - # TODO: Fix logging - # self.log_dict(metrics, on_step=True, sync_dist=True) + self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): # (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -277,9 +273,6 @@ def validation_step(self, batch, batch_idx): y_hat = y_hat.squeeze() y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length - # spec = self.get_spec(y) - # spec_lengths = torch.ones(spec.shape[0]) * spec.shape[2] - # Note to modify the functions / use the ones in NeMo, we need the lengths mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) @@ -314,81 +307,6 @@ def validation_step(self, batch, batch_idx): dataformats="HWC", ) - def validation_step_alt(self, batch, batch_idx): - - (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch - - # plot audio once per epoch - if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: - - y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self.hps.data.hop_length - - # Note to modify the functions / use the ones in NeMo, we need the lengths - mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - - self.logger.experiment.add_audio( - "val_wav_target", - y[0, : y_lengths[0]].data.cpu().numpy(), - self.global_step, - sample_rate=self.sample_rate, - ) - - self.logger.experiment.add_audio( - "val_wav_predicted", - y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), - self.global_step, - sample_rate=self.sample_rate, - ) - - self.logger.experiment.add_image( - "val_mel_target", - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - self.logger.experiment.add_image( - "val_mel_predicted", - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) - mel = modules.spec_to_mel_torch( - spec, - self._cfg.train_ds.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - mel = modules.spec_to_mel_torch( - spec, - self._cfg.train_ds.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) - y_hat_mel = modules.mel_spectrogram_torch( - y_hat.squeeze(1), - self._cfg.train_ds.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.hop_size, - self._cfg.preprocessing.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel - self.log_dict({"val_loss * c_mel": loss_mel}, on_epoch=True, sync_dist=True) - @staticmethod def _loader(cfg): try: diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py index de7b5f1c8ce6..606f76ff45d9 100644 --- a/nemo/collections/tts/modules/vits_mel_processing.py +++ b/nemo/collections/tts/modules/vits_mel_processing.py @@ -1,16 +1,5 @@ -import math -import os -import random import torch -from torch import nn -import torch.nn.functional as F import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read from librosa.filters import mel as librosa_mel_fn MAX_WAV_VALUE = 32768.0 From 340d9f09d9d711a5342acb55714eb8638910cfd1 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Wed, 19 Jan 2022 21:51:53 -0500 Subject: [PATCH 037/244] clean up unused code --- nemo/collections/tts/models/vits.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e464e985d5fd..da6820d95fd3 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -39,16 +39,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) - - self._parser = parsers.make_parser( - labels=cfg.labels, - name='en', - unk_id=-1, - blank_id=-1, - do_normalize=True, - abbreviation_version="fastpitch", - make_table=False, - ) super().__init__(cfg=cfg, trainer=trainer) From 73dbb98015785445ac27c39f629fc778d7a10053 Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Wed, 19 Jan 2022 21:40:27 -0500 Subject: [PATCH 038/244] Address lgtm alerts --- examples/tts/vits.py | 1 - nemo/collections/tts/losses/vits_losses.py | 1 - nemo/collections/tts/models/vits.py | 102 +++--------------- .../tts/modules/vits_mel_processing.py | 11 -- 4 files changed, 12 insertions(+), 103 deletions(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 56c48c331840..87e76699b7fa 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -13,7 +13,6 @@ # limitations under the License. import pytorch_lightning as pl -from pytorch_lightning.plugins import DDPPlugin from nemo.collections.common.callbacks import LogEpochTimeCallback from nemo.collections.tts.models.vits import VitsModel diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index bee515d3730d..10e1f7836aec 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -1,5 +1,4 @@ import torch -from torch.nn import functional as F from nemo.core.classes import Loss, typecheck from nemo.core.neural_types.elements import LossType, VoidType diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e464e985d5fd..a751c07a98f0 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -15,13 +15,11 @@ from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator -from nemo.collections.tts.torch.data import TTSDataset from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss import nemo.collections.tts.modules.vits_modules as modules -from nemo.collections.tts.modules.vits_modules import init_weights, get_padding, SynthesizerTrn, MultiPeriodDiscriminator @dataclass @@ -117,8 +115,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): upsample_rates = cfg.generator.upsample_rates, upsample_initial_channel = cfg.generator.upsample_initial_channel, upsample_kernel_sizes = cfg.generator.upsample_kernel_sizes, - # cfg.filter_channels // 2 + 1, - # cfg.segment_size // cfg.train_ds.dataset.hop_length, ) self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) self.automatic_optimization = False @@ -169,7 +165,6 @@ def configure_optimizers(self): def forward(self, batch, batch_idx): with torch.no_grad(): - # TODO: Fix (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch # remove else @@ -179,7 +174,7 @@ def forward(self, batch, batch_idx): y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.hop_size - return y_hat, y_lengths + return y_hat, y_hat_lengths def get_spec(self, audio): @@ -191,7 +186,6 @@ def get_spec(self, audio): return spec def training_step(self, batch, batch_idx): - # (x, x_lengths, y, y_lengths) = batch (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) @@ -265,22 +259,25 @@ def training_step(self, batch, batch_idx): "losses_disc_g": losses_disc_g, "loss_disc_all": loss_disc_all, } - - # TODO: Fix logging - # self.log_dict(metrics, on_step=True, sync_dist=True) + + for i, v in enumerate(losses_gen): + metrics["loss_gen_i_{}".format(i)] = v + + for i, v in enumerate(losses_disc_r): + metrics["loss_disc_r_{}".format(i)] = v + + for i, v in enumerate(losses_disc_g): + metrics["loss_disc_g_{}".format(i)] = v + + self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): - # (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch (y, y_lengths, x, x_lengths) = batch y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) y_hat = y_hat.squeeze() y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length - # spec = self.get_spec(y) - # spec_lengths = torch.ones(spec.shape[0]) * spec.shape[2] - - # Note to modify the functions / use the ones in NeMo, we need the lengths mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) @@ -314,81 +311,6 @@ def validation_step(self, batch, batch_idx): dataformats="HWC", ) - def validation_step_alt(self, batch, batch_idx): - - (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch - - # plot audio once per epoch - if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: - - y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self.hps.data.hop_length - - # Note to modify the functions / use the ones in NeMo, we need the lengths - mel, mel_lengths = self.audio_to_melspec_precessor(x, x_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - - self.logger.experiment.add_audio( - "val_wav_target", - y[0, : y_lengths[0]].data.cpu().numpy(), - self.global_step, - sample_rate=self.sample_rate, - ) - - self.logger.experiment.add_audio( - "val_wav_predicted", - y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), - self.global_step, - sample_rate=self.sample_rate, - ) - - self.logger.experiment.add_image( - "val_mel_target", - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - self.logger.experiment.add_image( - "val_mel_predicted", - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) - mel = modules.spec_to_mel_torch( - spec, - self._cfg.train_ds.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - mel = modules.spec_to_mel_torch( - spec, - self._cfg.train_ds.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) - y_hat_mel = modules.mel_spectrogram_torch( - y_hat.squeeze(1), - self._cfg.train_ds.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.hop_size, - self._cfg.preprocessing.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel - self.log_dict({"val_loss * c_mel": loss_mel}, on_epoch=True, sync_dist=True) - @staticmethod def _loader(cfg): try: diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py index de7b5f1c8ce6..606f76ff45d9 100644 --- a/nemo/collections/tts/modules/vits_mel_processing.py +++ b/nemo/collections/tts/modules/vits_mel_processing.py @@ -1,16 +1,5 @@ -import math -import os -import random import torch -from torch import nn -import torch.nn.functional as F import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read from librosa.filters import mel as librosa_mel_fn MAX_WAV_VALUE = 32768.0 From 6aa5fc90e4e19a15fc021d6d9b5967dc61a26c2c Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Wed, 19 Jan 2022 22:56:49 -0500 Subject: [PATCH 039/244] Refactor audio_to_mel_torch method --- nemo/collections/tts/models/vits.py | 3 +- .../tts/modules/vits_mel_processing.py | 32 +++---------------- nemo/collections/tts/modules/vits_modules.py | 2 +- 3 files changed, 7 insertions(+), 30 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 81e375734a1b..e871a1f3c4f8 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -188,7 +188,8 @@ def training_step(self, batch, batch_idx): self._cfg.mel_fmax ) y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) - y_hat_mel = modules.mel_spectrogram_torch( + + y_hat_mel = modules.audio_to_mel_torch( y_hat.squeeze(1), self._cfg.filter_length, self._cfg.n_mel_channels, diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py index 606f76ff45d9..0d8bc0c15522 100644 --- a/nemo/collections/tts/modules/vits_mel_processing.py +++ b/nemo/collections/tts/modules/vits_mel_processing.py @@ -71,31 +71,7 @@ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): return spec -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec \ No newline at end of file +def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) + melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) + return melspec \ No newline at end of file diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 3f85fdac6f3c..fba250c1d795 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -927,7 +927,7 @@ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): return spec -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): +def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): if torch.min(y) < -1.: print('min value is ', torch.min(y)) if torch.max(y) > 1.: From 817db7003acdcb1e6160742c3a29f8e0acb08a96 Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Wed, 19 Jan 2022 23:45:10 -0500 Subject: [PATCH 040/244] Use NeMo FilterBank to get melspec Todo: set self.fb --- examples/tts/conf/vits.yaml | 3 +++ .../asr/parts/preprocessing/features.py | 2 +- nemo/collections/tts/models/vits.py | 14 ++++---------- 3 files changed, 8 insertions(+), 11 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 25b0b3e806de..d425d8f764a1 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -163,6 +163,7 @@ model: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures dither: 0.0 nfilt: ${model.n_mel_channels} + stft_pad_amount: 384 frame_splicing: 1 highfreq: 8000 log: true @@ -177,6 +178,8 @@ model: pad_to: 1 pad_value: 0 preemph: null + stft_conv: false + nb_augmentation_prob : 0 sample_rate: ${model.sample_rate} window: hann exact_pad: true diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index 9ff02a3a0fd1..118ed1c279cd 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -422,7 +422,7 @@ def forward(self, x, seq_len): # dot with filterbank energies x = torch.matmul(self.fb.to(x.dtype), x) - + print(self.fb) # log features if required if self.log: if self.log_zero_guard_type == "add": diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e871a1f3c4f8..1525fd681832 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -45,6 +45,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): OmegaConf.merge(cfg, schema) self.audio_to_melspec_precessor = instantiate(cfg.preprocessor) + # self.audio_to_melspec_precessor.fb = torch.tensor self.melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True) self.encoder = instantiate(cfg.input_fft) @@ -189,16 +190,9 @@ def training_step(self, batch, batch_idx): ) y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) - y_hat_mel = modules.audio_to_mel_torch( - y_hat.squeeze(1), - self._cfg.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.hop_size, - self._cfg.preprocessor.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) + y_hat_lengths = self.audio_to_melspec_precessor.get_seq_len(y_hat) + y_hat_mel = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) + y = torch.unsqueeze(y, 1) y = modules.slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) From 37cb9e5bfd025c166c37d3253ba4b7313ddc33bd Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 20 Jan 2022 11:28:41 -0500 Subject: [PATCH 041/244] Fix filterbank max frequency to match with original VITS --- examples/tts/conf/vits.yaml | 2 +- nemo/collections/tts/models/vits.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 25b0b3e806de..7a58c90e4c34 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -23,7 +23,7 @@ n_window_size: 1024 n_window_stride: 256 n_fft: 1024 lowfreq: 0 -highfreq: 8000 +highfreq: null window: "hann" pitch_loss_scale: 0.1 diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e871a1f3c4f8..35970117cc2e 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -44,8 +44,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Ensure passed cfg is compliant with schema OmegaConf.merge(cfg, schema) - self.audio_to_melspec_precessor = instantiate(cfg.preprocessor) - self.melspec_fn = instantiate(cfg.preprocessor, highfreq=None, use_grads=True) + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) self.encoder = instantiate(cfg.input_fft) self.duration_predictor = instantiate(cfg.duration_predictor) From b6e24aead543074a09c96db88bd92223fe77ff88 Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 20 Jan 2022 11:31:29 -0500 Subject: [PATCH 042/244] Fix filterbank features correct length --- examples/tts/conf/vits.yaml | 3 +++ nemo/collections/asr/parts/preprocessing/features.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 7a58c90e4c34..16644a44c780 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -163,6 +163,7 @@ model: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures dither: 0.0 nfilt: ${model.n_mel_channels} + stft_pad_amount: 384 frame_splicing: 1 highfreq: 8000 log: true @@ -177,6 +178,8 @@ model: pad_to: 1 pad_value: 0 preemph: null + stft_conv: false + nb_augmentation_prob : 0 sample_rate: ${model.sample_rate} window: hann exact_pad: true diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index 9ff02a3a0fd1..118ed1c279cd 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -422,7 +422,7 @@ def forward(self, x, seq_len): # dot with filterbank energies x = torch.matmul(self.fb.to(x.dtype), x) - + print(self.fb) # log features if required if self.log: if self.log_zero_guard_type == "add": From 05627e4f1d3a981d1be2015ae3416f9eeb0add54 Mon Sep 17 00:00:00 2001 From: "richa.ren@mail.utoronto.ca" Date: Thu, 20 Jan 2022 11:34:59 -0500 Subject: [PATCH 043/244] Address lgtm issues --- nemo/collections/tts/models/vits.py | 16 ++++++++-------- nemo/collections/tts/modules/vits_modules.py | 12 ++---------- 2 files changed, 10 insertions(+), 18 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 35970117cc2e..684d41d53a37 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -8,11 +8,10 @@ from torch.nn import functional as F from typing import Any, Dict -import nemo.collections.tts.modules.vits_modules as modules from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator +from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator, spec_to_mel_torch, slice_segments, clip_grad_value_ from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging @@ -51,7 +50,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.pitch_predictor = instantiate(cfg.pitch_predictor) self.generator = instantiate(cfg.generator) - self.multiperioddisc = modules.MultiPeriodDiscriminator() + self.multiperioddisc = MultiPeriodDiscriminator() self.feat_matching_loss = FeatureLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() @@ -178,7 +177,7 @@ def training_step(self, batch, batch_idx): with autocast(enabled=False): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) - mel = modules.spec_to_mel_torch( + mel = spec_to_mel_torch( spec, self._cfg.filter_length, self._cfg.n_mel_channels, @@ -186,7 +185,7 @@ def training_step(self, batch, batch_idx): self._cfg.mel_fmin, self._cfg.mel_fmax ) - y_mel = modules.slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) + y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) y_hat_mel = modules.audio_to_mel_torch( y_hat.squeeze(1), @@ -199,7 +198,7 @@ def training_step(self, batch, batch_idx): self._cfg.mel_fmax ) y = torch.unsqueeze(y, 1) - y = modules.slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice + y = slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc @@ -207,7 +206,7 @@ def training_step(self, batch, batch_idx): # train discriminator self.optim_d.zero_grad() self.manual_backward(loss_disc_all) - modules.clip_grad_value_(self.net_d.parameters(), None) + clip_grad_value_(self.net_d.parameters(), None) self.optim_d.step() with autocast(enabled=True): @@ -224,7 +223,7 @@ def training_step(self, batch, batch_idx): # train generator self.optim_g.zero_grad() self.manual_backward(loss_gen_all) - modules.clip_grad_value_(self.net_g.parameters(), None) + clip_grad_value_(self.net_g.parameters(), None) self.optim_d.step() schedulers = self.lr_schedulers() @@ -319,6 +318,7 @@ def setup_test_data(self, cfg): """Omitted.""" pass + @classmethod def list_available_models(cls) -> 'List[PretrainedModelInfo]': list_of_models = [] # TODO: List available models?? diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index fba250c1d795..9545024fc195 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -11,7 +11,6 @@ from nemo.collections.tts.modules.monotonic_align import maximum_path from nemo.collections.tts.modules.vits_mel_processing import librosa_mel_fn, spectral_normalize_torch -#from nemo.collections.tts.vits_modules import MultiPeriodDiscriminator, MultiScaleDiscriminator LRELU_SLOPE = 0.1 @@ -1087,8 +1086,6 @@ def generate_path(duration, mask): duration: [b, 1, t_x] mask: [b, 1, t_y, t_x] """ - device = duration.device - b, _, t_y, t_x = mask.shape cum_duration = torch.cumsum(duration, -1) @@ -1185,16 +1182,12 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) self.norm_layers_2.append(LayerNorm(hidden_channels)) - def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - def forward(self, x, x_mask, h, h_mask): """ x: decoder input h: encoder output """ - self_attn_mask = self.subsequent_mask(x_mask.size(2)).to(device=x.device, dtype=x.dtype) + self_attn_mask = torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))).unsqueeze(0).unsqueeze(0).to(device=x.device, dtype=x.dtype) encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask for i in range(self.n_layers): @@ -1262,7 +1255,7 @@ def forward(self, x, c, attn_mask=None): def attention(self, query, key, value, mask=None): # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.size(), query.size(2)) + b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2) query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) @@ -1312,7 +1305,6 @@ def _matmul_with_relative_keys(self, x, y): return ret def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 # Pad first before slice to avoid using cond ops. pad_length = max(length - (self.window_size + 1), 0) slice_start_position = max((self.window_size + 1) - length, 0) From 697331b6a9a75b21dd4f0cb403f3722dffe70a3b Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 20 Jan 2022 11:41:54 -0500 Subject: [PATCH 044/244] Remove print statements --- nemo/collections/asr/parts/preprocessing/features.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index 118ed1c279cd..c9eb773fe6e3 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -422,7 +422,6 @@ def forward(self, x, seq_len): # dot with filterbank energies x = torch.matmul(self.fb.to(x.dtype), x) - print(self.fb) # log features if required if self.log: if self.log_zero_guard_type == "add": From 4ea74b87f804ae4623b0072deccf34dab74e416f Mon Sep 17 00:00:00 2001 From: jasonjjl1999 Date: Thu, 20 Jan 2022 11:42:31 -0500 Subject: [PATCH 045/244] Remove stft_pad_amount --- examples/tts/conf/vits.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 16644a44c780..20146d0902ef 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -163,7 +163,6 @@ model: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures dither: 0.0 nfilt: ${model.n_mel_channels} - stft_pad_amount: 384 frame_splicing: 1 highfreq: 8000 log: true From 4cf6ffe5bdf4b2022f89adac5745d8fc98730b91 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Fri, 21 Jan 2022 18:12:57 +0300 Subject: [PATCH 046/244] new structure for tts datasets in script folder Signed-off-by: Oktai Tatanov --- nemo/collections/tts/torch/readme.md | 11 -- .../cmudict/get_data.sh} | 0 .../tts/extract_sup_data.py | 51 ++++++ .../tts/hui_acg/get_data.py | 162 ++++++++++++++++++ .../ds_conf/ds_for_fastpitch_align.yaml | 51 ++++++ .../ljspeech/ds_conf/ds_for_mixer_tts.yaml | 51 ++++++ .../ljspeech/ds_conf/ds_for_mixer_tts_x.yaml | 45 +++++ .../tts/ljspeech/get_data.py | 121 +++++++++++++ 8 files changed, 481 insertions(+), 11 deletions(-) rename scripts/dataset_processing/{get_cmudict.sh => tts/cmudict/get_data.sh} (100%) create mode 100644 scripts/dataset_processing/tts/extract_sup_data.py create mode 100644 scripts/dataset_processing/tts/hui_acg/get_data.py create mode 100644 scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml create mode 100644 scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml create mode 100644 scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml create mode 100644 scripts/dataset_processing/tts/ljspeech/get_data.py diff --git a/nemo/collections/tts/torch/readme.md b/nemo/collections/tts/torch/readme.md index 78809f7ef8ed..89ec67fcbbf4 100644 --- a/nemo/collections/tts/torch/readme.md +++ b/nemo/collections/tts/torch/readme.md @@ -49,14 +49,3 @@ for batch in tqdm(dataloader, total=len(dataloader)): pitch_tensor = torch.cat(pitch_list) print(f"PITCH_MEAN, PITCH_STD = {pitch_tensor.mean().item()}, {pitch_tensor.std().item()}") ``` - -## ToDos - - - [ ] Populate *torch_tts* - - [x] Create a new datalayer that can be used interchangeably - - [x] Add TTS models with new dataset - - [ ] Split Lightning away from core - - [x] v0.1 that import checks a lot of lightning - - [ ] Split up code (core, collections, utils) better - - [ ] Enable building *text_normlization* without installing lightning - - [ ] Look into how `Serialization` works without hydra diff --git a/scripts/dataset_processing/get_cmudict.sh b/scripts/dataset_processing/tts/cmudict/get_data.sh similarity index 100% rename from scripts/dataset_processing/get_cmudict.sh rename to scripts/dataset_processing/tts/cmudict/get_data.sh diff --git a/scripts/dataset_processing/tts/extract_sup_data.py b/scripts/dataset_processing/tts/extract_sup_data.py new file mode 100644 index 000000000000..3006a0c979bb --- /dev/null +++ b/scripts/dataset_processing/tts/extract_sup_data.py @@ -0,0 +1,51 @@ +import torch +from tqdm import tqdm +from hydra.utils import instantiate +from nemo.core.config import hydra_runner + + +def preprocess_ds_for_fastpitch_align(dataloader): + pitch_list = [] + for batch in tqdm(dataloader, total=len(dataloader)): + tokens, tokens_lengths, audios, audio_lengths, align_prior_matrices, pitches, pitches_lengths = batch + + pitch = pitches.squeeze(0) + pitch_list.append(pitch[pitch != 0]) + + pitch_tensor = torch.cat(pitch_list) + print(f"PITCH_MEAN, PITCH_STD = {pitch_tensor.mean().item()}, {pitch_tensor.std().item()}") + + +def preprocess_ds_for_mixer_tts_x(dataloader): + pitch_list = [] + for batch in tqdm(dataloader, total=len(dataloader)): + tokens, tokens_lengths, audios, audio_lengths, align_prior_matrices, pitches, pitches_lengths, lm_tokens = batch + + pitch = pitches.squeeze(0) + pitch_list.append(pitch[pitch != 0]) + + pitch_tensor = torch.cat(pitch_list) + print(f"PITCH_MEAN, PITCH_STD = {pitch_tensor.mean().item()}, {pitch_tensor.std().item()}") + +CFG_NAME2FUNC = { + "ds_for_fastpitch_align": preprocess_ds_for_fastpitch_align, + "ds_for_mixer_tts": preprocess_ds_for_fastpitch_align, + "ds_for_mixer_tts_x": preprocess_ds_for_mixer_tts_x, +} + +@hydra_runner(config_path='conf/ljspeech', config_name='ds_for_fastpitch_align') +def main(cfg): + dataset = instantiate(cfg.dataset) + dataloader = torch.utils.data.DataLoader( + dataset=dataset, + batch_size=1, + collate_fn=dataset._collate_fn, + num_workers=4 + ) + + print(f"Processing {cfg.manifest_filepath}:") + CFG_NAME2FUNC[cfg.name](dataloader) + + +if __name__ == '__main__': + main() # noqa pylint: disable=no-value-for-parameter diff --git a/scripts/dataset_processing/tts/hui_acg/get_data.py b/scripts/dataset_processing/tts/hui_acg/get_data.py new file mode 100644 index 000000000000..15f25ea2f591 --- /dev/null +++ b/scripts/dataset_processing/tts/hui_acg/get_data.py @@ -0,0 +1,162 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import random +import json +import shutil +import urllib.request +from pathlib import Path +from tqdm import tqdm + +from nemo_text_processing.text_normalization.normalize import Normalizer + + +def get_args(): + parser = argparse.ArgumentParser(description='Download HUI-Audio-Corpus-German and create manifests with predefined split') + + parser.add_argument("--data-root", required=True, type=Path) + parser.add_argument("--speaker", default="Karlsson", choices=["Bernd_Ungerer", "Hokuspokus", "Friedrich", "Karlsson", "Eva_K"], type=str) + parser.add_argument("--set-type", default="clean", choices=["full", "clean"], type=str) + + parser.add_argument("--min-duration", default=0.1, type=float) + parser.add_argument("--max-duration", default=15, type=float) + parser.add_argument("--val-size", default=100, type=int) + parser.add_argument("--test-size", default=200, type=int) + parser.add_argument("--seed-for-ds-split", default=100, type=float, help="Seed for deterministic split of train/dev/test, NVIDIA's default is 100") + + + args = parser.parse_args() + return args + +URLS_FULL = { + 'Bernd_Ungerer': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Bernd_Ungerer.zip", + 'Hokuspokus': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Hokuspokus.zip", + 'Friedrich': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Friedrich.zip", + 'Karlsson': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Karlsson.zip", + 'Eva_K': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Eva_K.zip", +} +URL_STATS_FULL = "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/datasetStatistic.zip" + +URLS_CLEAN = { + 'Bernd_Ungerer': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Bernd_Ungerer_Clean.zip", + 'Hokuspokus': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Hokuspokus_Clean.zip", + 'Friedrich': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Friedrich_Clean.zip", + 'Karlsson': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Karlsson_Clean.zip", + 'Eva_K': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Eva_K_Clean.zip", +} +URL_STATS_CLEAN = "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/datasetStatisticClean.zip" + +def __maybe_download_file(source_url, destination_path): + if not destination_path.exists(): + tmp_file_path = destination_path.with_suffix('.tmp') + urllib.request.urlretrieve(source_url, filename=str(tmp_file_path)) + tmp_file_path.rename(destination_path) + + +def __extract_file(filepath, data_dir): + try: + shutil.unpack_archive(filepath, data_dir) + except Exception: + print(f"Error while extracting {filepath}. Already extracted?") + + +def __process_data(dataset_path, stat_path, min_duration, max_duration, val_size, test_size, seed_for_ds_split): + # Create normalizer + text_normalizer = Normalizer( + lang="de", + input_case="cased", + overwrite_cache=True, + cache_dir=str(dataset_path / "cache_dir"), + ) + text_normalizer_call_kwargs = {"punct_pre_process": True, "punct_post_process": True} + normalizer_call = lambda x: text_normalizer.normalize(x, **text_normalizer_call_kwargs) + + entries = [] + with open(stat_path) as f: + # Let's skip the header + f.readline() + for line in tqdm(f): + file_stem, duration, *_, text = line.strip().split("|") + duration = float(duration) + + # file_stem -> dir_name (e.g. maerchen_01_f000051 -> maerchen, ber_psychoanalyse_01_f000046 -> ber_psychoanalyse) + dir_name = "_".join(file_stem.split("_")[:-2]) + audio_path = dataset_path / dir_name / "wavs" / f"{file_stem}.wav" + + if min_duration <= duration <= max_duration: + normalized_text = normalizer_call(text) + entry = { + 'audio_filepath': str(audio_path), + 'duration': duration, + 'text': text, + 'normalized_text': normalized_text + } + entries.append(entry) + + random.Random(seed_for_ds_split).shuffle(entries) + train_size = len(entries) - val_size - test_size + + assert train_size > 0, "Not enough data for train, val and test" + + def save(p, data): + with open(p, 'w') as f: + for d in data: + f.write(json.dumps(d) + '\n') + + save(dataset_path / "train_manifest.json", entries[:train_size]) + save(dataset_path / "val_manifest.json", entries[train_size:train_size + val_size]) + save(dataset_path / "test_manifest.json", entries[train_size + val_size:]) + + +def main(): + args = get_args() + + speaker = args.speaker + set_type = args.set_type + + dataset_root = args.data_root / "HUI-Audio-Corpus-German" + dataset_root.mkdir(parents=True, exist_ok=True) + + speaker_data_source = URLS_FULL[speaker] if set_type == "full" else URLS_CLEAN[speaker] + stats_source = URL_STATS_FULL if set_type == "full" else URL_STATS_CLEAN + + zipped_speaker_data_path = dataset_root / Path(speaker_data_source).name + zipped_stats_path = dataset_root / Path(stats_source).name + + __maybe_download_file(speaker_data_source, zipped_speaker_data_path) + __maybe_download_file(stats_source, zipped_stats_path) + + __extract_file(zipped_speaker_data_path, dataset_root) + __extract_file(zipped_stats_path, dataset_root) + + # Rename unzipped speaker data folder which has `speaker` name to `Path(speaker_data_source).stem` to avoid name conflicts between full and clean + speaker_data_path = dataset_root / speaker + speaker_data_path = speaker_data_path.rename(dataset_root / Path(speaker_data_source).stem) + + stats_path = dataset_root / Path(stats_source).stem / "speacker" / speaker / "overview.csv" + + __process_data( + speaker_data_path, + stats_path, + args.min_duration, + args.max_duration, + args.val_size, + args.test_size, + args.seed_for_ds_split + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml new file mode 100644 index 000000000000..4d2e284a9cfe --- /dev/null +++ b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml @@ -0,0 +1,51 @@ +name: "ds_for_fastpitch_align" + +manifest_filepath: "train_manifest.json" +sup_data_path: "sup_data" +sup_data_types: [ "align_prior_matrix", "pitch" ] +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" + +dataset: + _target_: nemo.collections.tts.torch.data.TTSDataset + manifest_filepath: ${manifest_filepath} + sample_rate: 22050 + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: 1024 + win_length: 1024 + hop_length: 256 + window: "hann" + n_mels: 80 + lowfreq: 0 + highfreq: 8000 + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: false + pitch_fmin: 65.40639132514966 + pitch_fmax: 2093.004522404789 + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer + punct: true + stresses: true + chars: true + apostrophe: true + pad_with_space: true + g2p: + _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} diff --git a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml new file mode 100644 index 000000000000..d43ac57c9bd5 --- /dev/null +++ b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml @@ -0,0 +1,51 @@ +name: "ds_for_mixer_tts" + +manifest_filepath: "train_manifest.json" +sup_data_path: "sup_data" +sup_data_types: [ "align_prior_matrix", "pitch" ] +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" + +dataset: + _target_: nemo.collections.tts.torch.data.TTSDataset + manifest_filepath: ${manifest_filepath} + sample_rate: 22050 + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: 1024 + win_length: 1024 + hop_length: 256 + window: "hann" + n_mels: 80 + lowfreq: 0 + highfreq: 8000 + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: false + pitch_fmin: 65.40639132514966 + pitch_fmax: 2093.004522404789 + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer + punct: true + stresses: true + chars: true + apostrophe: true + pad_with_space: true + g2p: + _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} diff --git a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml new file mode 100644 index 000000000000..59b2ba72a62e --- /dev/null +++ b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml @@ -0,0 +1,45 @@ +name: "ds_for_mixer_tts_x" + +manifest_filepath: "train_manifest.json" +sup_data_path: "sup_data" +sup_data_types: [ "align_prior_matrix", "pitch", "lm_tokens" ] +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" + +dataset: + _target_: nemo.collections.tts.torch.data.MixerTTSDataset + manifest_filepath: ${manifest_filepath} + sample_rate: 22050 + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: 1024 + win_length: 1024 + hop_length: 256 + window: "hann" + n_mels: 80 + lowfreq: 0 + highfreq: 8000 + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: false + pitch_fmin: 65.40639132514966 + pitch_fmax: 2093.004522404789 + lm_model: "albert" + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer + punct: true + apostrophe: true + pad_with_space: true + diff --git a/scripts/dataset_processing/tts/ljspeech/get_data.py b/scripts/dataset_processing/tts/ljspeech/get_data.py new file mode 100644 index 000000000000..32d38f9d15cd --- /dev/null +++ b/scripts/dataset_processing/tts/ljspeech/get_data.py @@ -0,0 +1,121 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import urllib.request +import json + +import sox +import tarfile +import wget +from pathlib import Path + +from nemo_text_processing.text_normalization.normalize import Normalizer + +def get_args(): + parser = argparse.ArgumentParser(description='Download LJSpeech and create manifests with predefined split') + parser.add_argument("--data-root", required=True, type=Path) + parser.add_argument('--whitelist-path', type=str, default=None) + + args = parser.parse_args() + return args + + +URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2" +FILELIST_BASE = 'https://raw.githubusercontent.com/NVIDIA/tacotron2/master/filelists' + + +def __maybe_download_file(source_url, destination_path): + if not destination_path.exists(): + tmp_file_path = destination_path.with_suffix('.tmp') + urllib.request.urlretrieve(source_url, filename=str(tmp_file_path)) + tmp_file_path.rename(destination_path) + + +def __extract_file(filepath, data_dir): + try: + tar = tarfile.open(filepath) + tar.extractall(data_dir) + tar.close() + except Exception: + print(f"Error while extracting {filepath}. Already extracted?") + + +def __process_data(data_root, whitelist_path): + if whitelist_path is None: + wget.download( + "https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv", + out=data_root, + ) + whitelist_path = data_root / "whitelist_lj_speech.tsv" + + text_normalizer = Normalizer( + lang="en", + input_case="cased", + whitelist=whitelist_path, + overwrite_cache=True, + cache_dir=data_root / "cache_dir", + ) + text_normalizer_call_kwargs = {"punct_pre_process": True, "punct_post_process": True} + normalizer_call = lambda x: text_normalizer.normalize(x, **text_normalizer_call_kwargs) + + # Create manifests (based on predefined NVIDIA's split) + filelists = ['train', 'val', 'test'] + for split in filelists: + # Download file list if necessary + filelist_path = data_root / f"ljs_audio_text_{split}_filelist.txt" + + if not filelist_path.exists(): + wget.download(f"{FILELIST_BASE}/ljs_audio_text_{split}_filelist.txt", out=data_root) + + manifest_target = data_root / f"{split}_manifest.json" + with open(manifest_target, 'w') as f_out: + with open(filelist_path, 'r') as filelist: + print(f"\nCreating {manifest_target}...") + for line in filelist: + basename = line[6:16] + + text = line[21:].strip() + norm_text = normalizer_call(text) + + # Make sure corresponding wavfile exists + wav_path = data_root / 'wavs' / f"{basename}.wav" + assert wav_path.exists(), f"{wav_path} does not exist!" + + entry = { + 'audio_filepath': wav_path, + 'duration': sox.file_info.duration(wav_path), + 'text': text, + 'normalized_text': norm_text, + } + + f_out.write(json.dumps(entry) + '\n') + + +def main(): + args = get_args() + + tarred_data_path = args.data_root / "LJSpeech-1.1.tar.bz2" + + __maybe_download_file(URL, tarred_data_path) + __extract_file(str(tarred_data_path), str(args.data_root)) + + data_root = args.data_root / "LJSpeech-1.1" + whitelist_path = args.whitelist_path + + __process_data(data_root, whitelist_path) + + +if __name__ == '__main__': + main() From b37f56e713edc43eaf9e4b534f3e6257dccce5a6 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Mon, 24 Jan 2022 13:15:25 +0300 Subject: [PATCH 047/244] remove cmudict downloading Signed-off-by: Oktai Tatanov --- nemo/collections/tts/torch/g2ps.py | 3 ++- scripts/dataset_processing/tts/cmudict/get_data.sh | 7 ------- 2 files changed, 2 insertions(+), 8 deletions(-) delete mode 100644 scripts/dataset_processing/tts/cmudict/get_data.sh diff --git a/nemo/collections/tts/torch/g2ps.py b/nemo/collections/tts/torch/g2ps.py index cdd664af0891..ac9e59d09f87 100644 --- a/nemo/collections/tts/torch/g2ps.py +++ b/nemo/collections/tts/torch/g2ps.py @@ -127,7 +127,8 @@ def _parse_as_cmu_dict(phoneme_dict_path=None, encoding='latin-1'): logging.warning( f"English g2p_dict will be used from nltk.corpus.cmudict.dict(), because phoneme_dict_path=None. " "Note that nltk.corpus.cmudict.dict() has old version (0.6) of CMUDict. " - "You can use the latest official version of CMUDict directly from NeMo using the path scripts/tts_dataset_files/cmudict-0.7b." + "You can use the latest official version of CMUDict (0.7b) with additional changes from NVIDIA directly from NeMo " + "using the path scripts/tts_dataset_files/cmudict-0.7b_nv22.01." ) return nltk.corpus.cmudict.dict() diff --git a/scripts/dataset_processing/tts/cmudict/get_data.sh b/scripts/dataset_processing/tts/cmudict/get_data.sh deleted file mode 100644 index 212d6a6b2e4a..000000000000 --- a/scripts/dataset_processing/tts/cmudict/get_data.sh +++ /dev/null @@ -1,7 +0,0 @@ -if [ ! -d "$1" ]; then - echo "Error: First argument must be a valid directory. Recommended path is scripts/tts_dataset_files" - exit 1 -fi - -echo "Downloading cmudict-0.7b from official site to $1" -wget http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b -P $1 \ No newline at end of file From d2d7c6dcf665b149321a2933e04bff8ef6f69fdd Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Mon, 24 Jan 2022 18:45:38 +0300 Subject: [PATCH 048/244] rename mixertts dataset, add vocoder dataset Signed-off-by: Oktai Tatanov --- examples/tts/conf/mixer-tts-x.yaml | 4 +- examples/tts/conf/mixer-tts.yaml | 4 +- nemo/collections/tts/torch/data.py | 144 +++++++++++++++++- .../tts/extract_sup_data.py | 23 ++- .../tts/hui_acg/get_data.py | 42 +++-- .../ds_conf/ds_for_fastpitch_align.yaml | 2 +- .../ljspeech/ds_conf/ds_for_mixer_tts.yaml | 2 +- .../ljspeech/ds_conf/ds_for_mixer_tts_x.yaml | 2 +- .../tts/ljspeech/get_data.py | 19 +-- 9 files changed, 203 insertions(+), 39 deletions(-) diff --git a/examples/tts/conf/mixer-tts-x.yaml b/examples/tts/conf/mixer-tts-x.yaml index 04130d49fd67..a8bd2b73e6be 100644 --- a/examples/tts/conf/mixer-tts-x.yaml +++ b/examples/tts/conf/mixer-tts-x.yaml @@ -77,7 +77,7 @@ model: train_ds: dataset: - _target_: nemo.collections.tts.torch.data.MixerTTSDataset + _target_: nemo.collections.tts.torch.data.MixerTTSXDataset manifest_filepath: ${train_dataset} sample_rate: ${model.sample_rate} sup_data_path: ${sup_data_path} @@ -106,7 +106,7 @@ model: validation_ds: dataset: - _target_: nemo.collections.tts.torch.data.MixerTTSDataset + _target_: nemo.collections.tts.torch.data.MixerTTSXDataset manifest_filepath: ${validation_datasets} sample_rate: ${model.sample_rate} sup_data_path: ${sup_data_path} diff --git a/examples/tts/conf/mixer-tts.yaml b/examples/tts/conf/mixer-tts.yaml index 0f6d2eac71d3..945eebf3efda 100644 --- a/examples/tts/conf/mixer-tts.yaml +++ b/examples/tts/conf/mixer-tts.yaml @@ -81,7 +81,7 @@ model: train_ds: dataset: - _target_: nemo.collections.tts.torch.data.MixerTTSDataset + _target_: nemo.collections.tts.torch.data.TTSDataset manifest_filepath: ${train_dataset} sample_rate: ${model.sample_rate} sup_data_path: ${sup_data_path} @@ -109,7 +109,7 @@ model: validation_ds: dataset: - _target_: nemo.collections.tts.torch.data.MixerTTSDataset + _target_: nemo.collections.tts.torch.data.TTSDataset manifest_filepath: ${validation_datasets} sample_rate: ${model.sample_rate} sup_data_path: ${sup_data_path} diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index cf5e74fb0ce2..d84f72e6d71d 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -14,16 +14,20 @@ import json +import math import pickle from pathlib import Path +from random import random from typing import Callable, Dict, List, Optional, Union import librosa +import numpy as np import torch from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer +from nemo.collections.asr.parts.preprocessing.segment import AudioSegment from nemo.collections.tts.torch.helpers import ( BetaBinomialInterpolator, beta_binomial_prior_distribution, @@ -554,7 +558,7 @@ def _collate_fn(self, batch): return joined_data -class MixerTTSDataset(TTSDataset): +class MixerTTSXDataset(TTSDataset): def __init__(self, **kwargs): super().__init__(**kwargs) @@ -646,3 +650,141 @@ def _collate_fn(self, batch): joined_data = self.join_data(data_dict) return joined_data + + +class VocoderDataset(Dataset): + def __init__( + self, + manifest_filepath: str, + sample_rate: int, + n_segments: Optional[int] = None, + min_duration: Optional[float] = None, + max_duration: Optional[float] = None, + ignore_file: Optional[str] = None, + trim: Optional[bool] = False, + load_precomputed_mel: bool = False, + hop_length: int = 256, + ): + if isinstance(manifest_filepath, str): + manifest_filepath = [manifest_filepath] + self.manifest_filepath = manifest_filepath + + self.data = [] + audio_files = [] + total_duration = 0 + for manifest_file in self.manifest_filepath: + with open(Path(manifest_file).expanduser(), 'r') as f: + logging.info(f"Loading dataset from {manifest_file}.") + for line in tqdm(f): + item = json.loads(line) + + if "mel_filepath" not in item and load_precomputed_mel: + raise ValueError(f"mel_filepath is missing in {manifest_file}") + + file_info = { + "audio_filepath": item["audio_filepath"], + "mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None, + "duration": item["duration"] if "duration" in item else None, + } + + audio_files.append(file_info) + + if file_info["duration"] is None: + logging.info( + "Not all audio files have duration information. Duration logging will be disabled." + ) + total_duration = None + + if total_duration is not None: + total_duration += item["duration"] + + logging.info(f"Loaded dataset with {len(audio_files)} files.") + if total_duration is not None: + logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") + + if ignore_file: + logging.info(f"using {ignore_file} to prune dataset.") + with open(Path(ignore_file).expanduser(), "rb") as f: + wavs_to_ignore = set(pickle.load(f)) + + pruned_duration = 0 if total_duration is not None else None + pruned_items = 0 + for item in audio_files: + audio_path = item['audio_filepath'] + audio_id = Path(audio_path).stem + + # Prune data according to min/max_duration & the ignore file + if total_duration is not None: + if (min_duration and item["duration"] < min_duration) or ( + max_duration and item["duration"] > max_duration + ): + pruned_duration += item["duration"] + pruned_items += 1 + continue + + if ignore_file and (audio_id in wavs_to_ignore): + pruned_items += 1 + pruned_duration += item["duration"] + wavs_to_ignore.remove(audio_id) + continue + + self.data.append(item) + + logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files") + if pruned_duration is not None: + logging.info( + f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains " + f"{(total_duration - pruned_duration) / 3600:.2f} hours." + ) + + self.load_precomputed_mel = load_precomputed_mel + self.featurizer = WaveformFeaturizer(sample_rate=sample_rate) + self.sample_rate = sample_rate + self.n_segments = n_segments + self.hop_length = hop_length + self.trim = trim + + def _collate_fn(self, batch): + if self.load_precomputed_mel: + return torch.utils.data.dataloader.default_collate(batch) + + audio_lengths = [audio_len for _, audio_len in batch] + audio_signal = torch.zeros(len(batch), max(audio_lengths), dtype=torch.float) + + for i, sample in enumerate(batch): + audio_signal[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0]) + + return audio_signal, torch.tensor(audio_lengths, dtype=torch.long) + + def __getitem__(self, index): + sample = self.data[index] + + if not self.load_precomputed_mel: + features = AudioSegment.segment_from_file( + sample["audio_filepath"], + n_segments=self.n_segments if self.n_segments is not None else -1, + trim=self.trim, + ) + features = torch.tensor(features.samples) + audio, audio_length = features, torch.tensor(features.shape[0]).long() + + return audio, audio_length + else: + features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) + audio, audio_length = features, torch.tensor(features.shape[0]).long() + + mel = np.load(sample["mel_filepath"]) + frames = math.ceil(self.n_segments / self.hop_length) + + if len(audio) > self.n_segments: + start = random.randint(0, mel.shape[1] - frames - 2) + mel = mel[:, start : start + frames] + audio = audio[start * self.hop_length : (start + frames) * self.hop_length] + else: + mel = np.pad(mel, ((0, 0), (0, frames - mel.shape[1]))) + audio = torch.nn.functional.pad(audio, (0, self.n_segments - audio.shape[1])) + + return audio, audio.shape[1], mel + + def __len__(self): + return len(self.data) diff --git a/scripts/dataset_processing/tts/extract_sup_data.py b/scripts/dataset_processing/tts/extract_sup_data.py index 3006a0c979bb..f133c393d374 100644 --- a/scripts/dataset_processing/tts/extract_sup_data.py +++ b/scripts/dataset_processing/tts/extract_sup_data.py @@ -1,6 +1,7 @@ import torch -from tqdm import tqdm from hydra.utils import instantiate +from tqdm import tqdm + from nemo.core.config import hydra_runner @@ -19,7 +20,16 @@ def preprocess_ds_for_fastpitch_align(dataloader): def preprocess_ds_for_mixer_tts_x(dataloader): pitch_list = [] for batch in tqdm(dataloader, total=len(dataloader)): - tokens, tokens_lengths, audios, audio_lengths, align_prior_matrices, pitches, pitches_lengths, lm_tokens = batch + ( + tokens, + tokens_lengths, + audios, + audio_lengths, + align_prior_matrices, + pitches, + pitches_lengths, + lm_tokens, + ) = batch pitch = pitches.squeeze(0) pitch_list.append(pitch[pitch != 0]) @@ -27,20 +37,19 @@ def preprocess_ds_for_mixer_tts_x(dataloader): pitch_tensor = torch.cat(pitch_list) print(f"PITCH_MEAN, PITCH_STD = {pitch_tensor.mean().item()}, {pitch_tensor.std().item()}") + CFG_NAME2FUNC = { "ds_for_fastpitch_align": preprocess_ds_for_fastpitch_align, "ds_for_mixer_tts": preprocess_ds_for_fastpitch_align, "ds_for_mixer_tts_x": preprocess_ds_for_mixer_tts_x, } -@hydra_runner(config_path='conf/ljspeech', config_name='ds_for_fastpitch_align') + +@hydra_runner(config_path='ljspeech/ds_conf', config_name='ds_for_fastpitch_align') def main(cfg): dataset = instantiate(cfg.dataset) dataloader = torch.utils.data.DataLoader( - dataset=dataset, - batch_size=1, - collate_fn=dataset._collate_fn, - num_workers=4 + dataset=dataset, batch_size=1, collate_fn=dataset._collate_fn, num_workers=4 ) print(f"Processing {cfg.manifest_filepath}:") diff --git a/scripts/dataset_processing/tts/hui_acg/get_data.py b/scripts/dataset_processing/tts/hui_acg/get_data.py index 15f25ea2f591..dcae93eeeaa4 100644 --- a/scripts/dataset_processing/tts/hui_acg/get_data.py +++ b/scripts/dataset_processing/tts/hui_acg/get_data.py @@ -13,33 +13,45 @@ # limitations under the License. import argparse -import random import json +import random import shutil import urllib.request from pathlib import Path -from tqdm import tqdm from nemo_text_processing.text_normalization.normalize import Normalizer +from tqdm import tqdm def get_args(): - parser = argparse.ArgumentParser(description='Download HUI-Audio-Corpus-German and create manifests with predefined split') + parser = argparse.ArgumentParser( + description='Download HUI-Audio-Corpus-German and create manifests with predefined split' + ) parser.add_argument("--data-root", required=True, type=Path) - parser.add_argument("--speaker", default="Karlsson", choices=["Bernd_Ungerer", "Hokuspokus", "Friedrich", "Karlsson", "Eva_K"], type=str) + parser.add_argument( + "--speaker", + default="Karlsson", + choices=["Bernd_Ungerer", "Hokuspokus", "Friedrich", "Karlsson", "Eva_K"], + type=str, + ) parser.add_argument("--set-type", default="clean", choices=["full", "clean"], type=str) parser.add_argument("--min-duration", default=0.1, type=float) parser.add_argument("--max-duration", default=15, type=float) parser.add_argument("--val-size", default=100, type=int) parser.add_argument("--test-size", default=200, type=int) - parser.add_argument("--seed-for-ds-split", default=100, type=float, help="Seed for deterministic split of train/dev/test, NVIDIA's default is 100") - + parser.add_argument( + "--seed-for-ds-split", + default=100, + type=float, + help="Seed for deterministic split of train/dev/test, NVIDIA's default is 100", + ) args = parser.parse_args() return args + URLS_FULL = { 'Bernd_Ungerer': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Bernd_Ungerer.zip", 'Hokuspokus': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_full/Hokuspokus.zip", @@ -56,7 +68,10 @@ def get_args(): 'Karlsson': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Karlsson_Clean.zip", 'Eva_K': "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/dataset_clean/Eva_K_Clean.zip", } -URL_STATS_CLEAN = "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/datasetStatisticClean.zip" +URL_STATS_CLEAN = ( + "https://opendata.iisys.de/systemintegration/Datasets/HUI-Audio-Corpus-German/datasetStatisticClean.zip" +) + def __maybe_download_file(source_url, destination_path): if not destination_path.exists(): @@ -75,10 +90,7 @@ def __extract_file(filepath, data_dir): def __process_data(dataset_path, stat_path, min_duration, max_duration, val_size, test_size, seed_for_ds_split): # Create normalizer text_normalizer = Normalizer( - lang="de", - input_case="cased", - overwrite_cache=True, - cache_dir=str(dataset_path / "cache_dir"), + lang="de", input_case="cased", overwrite_cache=True, cache_dir=str(dataset_path / "cache_dir"), ) text_normalizer_call_kwargs = {"punct_pre_process": True, "punct_post_process": True} normalizer_call = lambda x: text_normalizer.normalize(x, **text_normalizer_call_kwargs) @@ -101,7 +113,7 @@ def __process_data(dataset_path, stat_path, min_duration, max_duration, val_size 'audio_filepath': str(audio_path), 'duration': duration, 'text': text, - 'normalized_text': normalized_text + 'normalized_text': normalized_text, } entries.append(entry) @@ -116,8 +128,8 @@ def save(p, data): f.write(json.dumps(d) + '\n') save(dataset_path / "train_manifest.json", entries[:train_size]) - save(dataset_path / "val_manifest.json", entries[train_size:train_size + val_size]) - save(dataset_path / "test_manifest.json", entries[train_size + val_size:]) + save(dataset_path / "val_manifest.json", entries[train_size : train_size + val_size]) + save(dataset_path / "test_manifest.json", entries[train_size + val_size :]) def main(): @@ -154,7 +166,7 @@ def main(): args.max_duration, args.val_size, args.test_size, - args.seed_for_ds_split + args.seed_for_ds_split, ) diff --git a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml index 4d2e284a9cfe..4bb6aff78a56 100644 --- a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml +++ b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_fastpitch_align.yaml @@ -4,7 +4,7 @@ manifest_filepath: "train_manifest.json" sup_data_path: "sup_data" sup_data_types: [ "align_prior_matrix", "pitch" ] whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" -phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" dataset: diff --git a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml index d43ac57c9bd5..abbaca477a45 100644 --- a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml +++ b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts.yaml @@ -4,7 +4,7 @@ manifest_filepath: "train_manifest.json" sup_data_path: "sup_data" sup_data_types: [ "align_prior_matrix", "pitch" ] whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" -phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" dataset: diff --git a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml index 59b2ba72a62e..03832d92574d 100644 --- a/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml +++ b/scripts/dataset_processing/tts/ljspeech/ds_conf/ds_for_mixer_tts_x.yaml @@ -6,7 +6,7 @@ sup_data_types: [ "align_prior_matrix", "pitch", "lm_tokens" ] whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" dataset: - _target_: nemo.collections.tts.torch.data.MixerTTSDataset + _target_: nemo.collections.tts.torch.data.MixerTTSXDataset manifest_filepath: ${manifest_filepath} sample_rate: 22050 sup_data_path: ${sup_data_path} diff --git a/scripts/dataset_processing/tts/ljspeech/get_data.py b/scripts/dataset_processing/tts/ljspeech/get_data.py index 32d38f9d15cd..1f9ee2d8b71e 100644 --- a/scripts/dataset_processing/tts/ljspeech/get_data.py +++ b/scripts/dataset_processing/tts/ljspeech/get_data.py @@ -13,15 +13,16 @@ # limitations under the License. import argparse -import urllib.request import json - -import sox import tarfile -import wget +import urllib.request from pathlib import Path +import sox +import wget from nemo_text_processing.text_normalization.normalize import Normalizer +from tqdm import tqdm + def get_args(): parser = argparse.ArgumentParser(description='Download LJSpeech and create manifests with predefined split') @@ -56,7 +57,7 @@ def __process_data(data_root, whitelist_path): if whitelist_path is None: wget.download( "https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv", - out=data_root, + out=str(data_root), ) whitelist_path = data_root / "whitelist_lj_speech.tsv" @@ -72,18 +73,18 @@ def __process_data(data_root, whitelist_path): # Create manifests (based on predefined NVIDIA's split) filelists = ['train', 'val', 'test'] - for split in filelists: + for split in tqdm(filelists): # Download file list if necessary filelist_path = data_root / f"ljs_audio_text_{split}_filelist.txt" if not filelist_path.exists(): - wget.download(f"{FILELIST_BASE}/ljs_audio_text_{split}_filelist.txt", out=data_root) + wget.download(f"{FILELIST_BASE}/ljs_audio_text_{split}_filelist.txt", out=str(data_root)) manifest_target = data_root / f"{split}_manifest.json" with open(manifest_target, 'w') as f_out: with open(filelist_path, 'r') as filelist: print(f"\nCreating {manifest_target}...") - for line in filelist: + for line in tqdm(filelist): basename = line[6:16] text = line[21:].strip() @@ -94,7 +95,7 @@ def __process_data(data_root, whitelist_path): assert wav_path.exists(), f"{wav_path} does not exist!" entry = { - 'audio_filepath': wav_path, + 'audio_filepath': str(wav_path), 'duration': sox.file_info.duration(wav_path), 'text': text, 'normalized_text': norm_text, From 8164da451a2428bf88c1fae9832debf2e74f28c6 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Mon, 24 Jan 2022 19:40:56 +0300 Subject: [PATCH 049/244] add libritts processing Signed-off-by: Oktai Tatanov --- .../tts/extract_sup_data.py | 15 +++ .../libritts/get_data.py} | 102 ++++++------------ 2 files changed, 46 insertions(+), 71 deletions(-) rename scripts/dataset_processing/{get_libritts_data.py => tts/libritts/get_data.py} (51%) diff --git a/scripts/dataset_processing/tts/extract_sup_data.py b/scripts/dataset_processing/tts/extract_sup_data.py index f133c393d374..c86da957bc77 100644 --- a/scripts/dataset_processing/tts/extract_sup_data.py +++ b/scripts/dataset_processing/tts/extract_sup_data.py @@ -1,3 +1,18 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import torch from hydra.utils import instantiate from tqdm import tqdm diff --git a/scripts/dataset_processing/get_libritts_data.py b/scripts/dataset_processing/tts/libritts/get_data.py similarity index 51% rename from scripts/dataset_processing/get_libritts_data.py rename to scripts/dataset_processing/tts/libritts/get_data.py index 18edb26853ff..955ba3bc64e5 100644 --- a/scripts/dataset_processing/get_libritts_data.py +++ b/scripts/dataset_processing/tts/libritts/get_data.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,30 +12,28 @@ # See the License for the specific language governing permissions and # limitations under the License. # -# USAGE: python get_libritts_data.py --data_root= -# --data_set= --num_workers= +# USAGE: python get_data.py --data-root= --data-set= --num-workers= # where can be: dev_clean, dev_other, test_clean, # test_other, train_clean_100, train_clean_360, train_other_500 or ALL # You can also put more than one data_set comma-separated: -# --data_set=dev_clean,train_clean_100 +# --data-set=dev_clean,train_clean_100 import argparse import fnmatch import functools import json -import logging import multiprocessing import os import subprocess import tarfile import urllib.request +from pathlib import Path from tqdm import tqdm -parser = argparse.ArgumentParser(description='LibriTTS Data download') -parser.add_argument("--data_root", required=True, default=None, type=str) -parser.add_argument("--data_sets", default="dev_clean", type=str) -parser.add_argument("--num_workers", default=4, type=int) -parser.add_argument('--log', dest='log', action='store_true', default=False) +parser = argparse.ArgumentParser(description='Download LibriTTS and create manifests') +parser.add_argument("--data-root", required=True, type=Path) +parser.add_argument("--data-sets", default="dev_clean", type=str) +parser.add_argument("--num-workers", default=4, type=int) args = parser.parse_args() URLS = { @@ -49,71 +47,40 @@ } -def __maybe_download_file(destination: str, source: str): - """ - Downloads source to destination if it doesn't exist. - If exists, skips download - Args: - destination: local filepath - source: url of resource - Returns: - """ - source = URLS[source] - if not os.path.exists(destination): - logging.info("{0} does not exist. Downloading ...".format(destination)) - urllib.request.urlretrieve(source, filename=destination + '.tmp') - os.rename(destination + '.tmp', destination) - logging.info("Downloaded {0}.".format(destination)) - else: - logging.info("Destination {0} exists. Skipping.".format(destination)) - return destination - - -def __extract_file(filepath: str, data_dir: str): +def __maybe_download_file(source_url, destination_path): + if not destination_path.exists(): + tmp_file_path = destination_path.with_suffix('.tmp') + urllib.request.urlretrieve(source_url, filename=str(tmp_file_path)) + tmp_file_path.rename(destination_path) + + +def __extract_file(filepath, data_dir): try: tar = tarfile.open(filepath) tar.extractall(data_dir) tar.close() except Exception: - logging.info('Not extracting. Maybe already there?') + print(f"Error while extracting {filepath}. Already extracted?") def __process_transcript(file_path: str): - """ - Loads wav from a given transcript, capturing the metadata. - Args: - file_path: path to a source transcript ending with .normalized.txt - Returns: - a list of metadata entries for processed files. - """ entries = [] with open(file_path, encoding="utf-8") as fin: text = fin.readlines()[0] - transcript_text = text.lower().strip() + + # TODO(oktai15): add normalized text via Normalizer/NormalizerWithAudio wav_file = file_path.replace(".normalized.txt", ".wav") - assert os.path.exists(wav_file), "{} not found!".format(wav_file) - # check duration - duration = subprocess.check_output("soxi -D {0}".format(wav_file), shell=True) - - entry = {} - entry['audio_filepath'] = os.path.abspath(wav_file) - entry['duration'] = float(duration) - entry['text'] = transcript_text + assert os.path.exists(wav_file), f"{wav_file} not found!" + duration = subprocess.check_output(f"soxi -D {wav_file}", shell=True) + transcript_text = text.lower().strip() + entry = {'audio_filepath': os.path.abspath(wav_file), 'duration': float(duration), 'text': transcript_text} + entries.append(entry) return entries -def __process_data(data_folder: str, manifest_file: str, num_workers: int): - """ - Loads wav and build manifests's json - Args: - data_folder: source with wav files - manifest_file: where to store manifest - num_workers: number of parallel workers processing files - Returns: - """ - +def __process_data(data_folder, manifest_file, num_workers): files = [] entries = [] @@ -138,27 +105,20 @@ def main(): data_sets = args.data_sets num_workers = args.num_workers - if args.log: - logging.basicConfig(level=logging.INFO) - if data_sets == "ALL": data_sets = "dev_clean,dev_other,train_clean_100,train_clean_360,train_other_500,test_clean,test_other" if data_sets == "mini": data_sets = "dev_clean,train_clean_100" for data_set in data_sets.split(','): - logging.info("\n\nWorking on: {0}".format(data_set)) - filepath = os.path.join(data_root, data_set + ".tar.gz") - logging.info("Getting {0}".format(data_set)) - __maybe_download_file(filepath, data_set.upper()) - logging.info("Extracting {0}".format(data_set)) - __extract_file(filepath, data_root) - logging.info("Processing {0}".format(data_set)) + filepath = data_root / f"{data_set}.tar.gz" + __maybe_download_file(URLS[data_set.upper()], filepath) + __extract_file(str(filepath), str(data_root)) + __process_data( - os.path.join(os.path.join(data_root, "LibriTTS"), data_set.replace("_", "-"),), - os.path.join(data_root, "LibriTTS", data_set + ".json"), + str(data_root / "LibriTTS" / data_set.replace("_", "-")), + str(data_root / "LibriTTS" / f"{data_set}.json"), num_workers=num_workers, ) - logging.info('Done!') if __name__ == "__main__": From cd5c041a88398d499f3bb05fa3732f2699948f22 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Tue, 25 Jan 2022 17:47:48 +0300 Subject: [PATCH 050/244] update tts dataset and libritts get data Signed-off-by: Oktai Tatanov --- nemo/collections/tts/torch/data.py | 28 ++++++++++++------- .../tts/libritts/get_data.py | 11 ++++---- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index d84f72e6d71d..f360080812d6 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -337,7 +337,7 @@ def get_log_mel(self, audio): def __getitem__(self, index): sample = self.data[index] - audio_stem = Path(sample["audio_filepath"]).stem + audio_path_as_text_id = sample["audio_filepath"].replace("/", "-").split(".")[0] features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) audio, audio_length = features, torch.tensor(features.shape[0]).long() @@ -352,7 +352,10 @@ def __getitem__(self, index): if mel_path is not None and Path(mel_path).exists(): log_mel = torch.load(mel_path) else: - mel_path = Path(self.sup_data_path) / f"mel_{audio_stem}.pt" + mel_folder = Path(self.sup_data_path) / "mel" + mel_folder.mkdir(exist_ok=True, parents=True) + + mel_path = mel_folder / f"mel{audio_path_as_text_id}.pt" if mel_path.exists(): log_mel = torch.load(mel_path) @@ -373,7 +376,10 @@ def __getitem__(self, index): mel_len = self.get_log_mel(audio).shape[2] align_prior_matrix = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item())) else: - prior_path = Path(self.sup_data_path) / f"pr_{audio_stem}.pt" + prior_folder = Path(self.sup_data_path) / "align_prior_matrix" + prior_folder.mkdir(exist_ok=True, parents=True) + + prior_path = prior_folder / f"prior{audio_path_as_text_id}.pt" if prior_path.exists(): align_prior_matrix = torch.load(prior_path) @@ -385,13 +391,11 @@ def __getitem__(self, index): pitch, pitch_length = None, None if Pitch in self.sup_data_types_set: - pitch_name = ( - f"{audio_stem}_pitch_pyin_" - f"fmin{self.pitch_fmin}_fmax{self.pitch_fmax}_" - f"fl{self.win_length}_hs{self.hop_len}.pt" - ) + pitch_folder = Path(self.sup_data_path) / "pitch" + pitch_folder.mkdir(exist_ok=True, parents=True) + + pitch_path = pitch_folder / f"pitch{audio_path_as_text_id}.pt" - pitch_path = Path(self.sup_data_path) / pitch_name if pitch_path.exists(): pitch = torch.load(pitch_path).float() else: @@ -415,7 +419,11 @@ def __getitem__(self, index): energy, energy_length = None, None if Energy in self.sup_data_types_set: - energy_path = Path(self.sup_data_path) / f"{audio_stem}_energy_wl{self.win_length}_hs{self.hop_len}.pt" + energy_folder = Path(self.sup_data_path) / "energy" + energy_folder.mkdir(exist_ok=True, parents=True) + + energy_path = energy_folder / f"energy{audio_path_as_text_id}.pt" + if energy_path.exists(): energy = torch.load(energy_path).float() else: diff --git a/scripts/dataset_processing/tts/libritts/get_data.py b/scripts/dataset_processing/tts/libritts/get_data.py index 955ba3bc64e5..8ff56a0c68d1 100644 --- a/scripts/dataset_processing/tts/libritts/get_data.py +++ b/scripts/dataset_processing/tts/libritts/get_data.py @@ -37,9 +37,9 @@ args = parser.parse_args() URLS = { - 'TRAIN_CLEAN_100': ("https://www.openslr.org/resources/60/train-clean-100.tar.gz"), - 'TRAIN_CLEAN_360': ("https://www.openslr.org/resources/60/train-clean-360.tar.gz"), - 'TRAIN_OTHER_500': ("https://www.openslr.org/resources/60/train-other-500.tar.gz"), + 'TRAIN_CLEAN_100': "https://www.openslr.org/resources/60/train-clean-100.tar.gz", + 'TRAIN_CLEAN_360': "https://www.openslr.org/resources/60/train-clean-360.tar.gz", + 'TRAIN_OTHER_500': "https://www.openslr.org/resources/60/train-other-500.tar.gz", 'DEV_CLEAN': "https://www.openslr.org/resources/60/dev-clean.tar.gz", 'DEV_OTHER': "https://www.openslr.org/resources/60/dev-other.tar.gz", 'TEST_CLEAN': "https://www.openslr.org/resources/60/test-clean.tar.gz", @@ -66,14 +66,13 @@ def __extract_file(filepath, data_dir): def __process_transcript(file_path: str): entries = [] with open(file_path, encoding="utf-8") as fin: - text = fin.readlines()[0] + text = fin.readlines()[0].strip() # TODO(oktai15): add normalized text via Normalizer/NormalizerWithAudio wav_file = file_path.replace(".normalized.txt", ".wav") assert os.path.exists(wav_file), f"{wav_file} not found!" duration = subprocess.check_output(f"soxi -D {wav_file}", shell=True) - transcript_text = text.lower().strip() - entry = {'audio_filepath': os.path.abspath(wav_file), 'duration': float(duration), 'text': transcript_text} + entry = {'audio_filepath': os.path.abspath(wav_file), 'duration': float(duration), 'text': text} entries.append(entry) From a86563737477edf9fee8eb4c8bbad1b7ffdb8f44 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Tue, 25 Jan 2022 18:35:45 +0300 Subject: [PATCH 051/244] fix bugs in vocoder ds Signed-off-by: Oktai Tatanov --- nemo/collections/tts/torch/data.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index f360080812d6..0b9eb0f5a042 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -16,12 +16,11 @@ import json import math import pickle +import random from pathlib import Path -from random import random from typing import Callable, Dict, List, Optional, Union import librosa -import numpy as np import torch from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm @@ -671,12 +670,19 @@ def __init__( ignore_file: Optional[str] = None, trim: Optional[bool] = False, load_precomputed_mel: bool = False, - hop_length: int = 256, + hop_length: Optional[int] = None, ): if isinstance(manifest_filepath, str): manifest_filepath = [manifest_filepath] self.manifest_filepath = manifest_filepath + if load_precomputed_mel: + if hop_length is None: + raise ValueError("hop_length must be specified when load_precomputed_mel is True") + + if n_segments is None: + raise ValueError("n_segments must be specified when load_precomputed_mel is True") + self.data = [] audio_files = [] total_duration = 0 @@ -781,7 +787,7 @@ def __getitem__(self, index): features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) audio, audio_length = features, torch.tensor(features.shape[0]).long() - mel = np.load(sample["mel_filepath"]) + mel = torch.load(sample["mel_filepath"]) frames = math.ceil(self.n_segments / self.hop_length) if len(audio) > self.n_segments: @@ -789,10 +795,10 @@ def __getitem__(self, index): mel = mel[:, start : start + frames] audio = audio[start * self.hop_length : (start + frames) * self.hop_length] else: - mel = np.pad(mel, ((0, 0), (0, frames - mel.shape[1]))) - audio = torch.nn.functional.pad(audio, (0, self.n_segments - audio.shape[1])) + mel = torch.nn.functional.pad(mel, (0, frames - mel.shape[1])) + audio = torch.nn.functional.pad(audio, (0, self.n_segments - len(audio))) - return audio, audio.shape[1], mel + return audio, len(audio), mel def __len__(self): return len(self.data) From 6749a3d91d53dfa92a652d28133486ade270067a Mon Sep 17 00:00:00 2001 From: treacker Date: Wed, 26 Jan 2022 04:36:44 -0800 Subject: [PATCH 052/244] add ds --- scripts/dataset_processing/tts/ljspeech/get_data.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/dataset_processing/tts/ljspeech/get_data.py b/scripts/dataset_processing/tts/ljspeech/get_data.py index 1f9ee2d8b71e..134a50882b2b 100644 --- a/scripts/dataset_processing/tts/ljspeech/get_data.py +++ b/scripts/dataset_processing/tts/ljspeech/get_data.py @@ -20,6 +20,10 @@ import sox import wget + +import sys +# sys.path.append("/nemo_text_processing/text_normalization/") +# sys.path.insert(0, '/nemo_text_processing/text_normalization/normalize/') from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm From a1e2bec187e35234381a59be5f8f749b53e138d0 Mon Sep 17 00:00:00 2001 From: treacker Date: Wed, 26 Jan 2022 07:09:43 -0800 Subject: [PATCH 053/244] changed vits yaml --- examples/tts/conf/vits.yaml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 20146d0902ef..4f38c2bacc3c 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -1,13 +1,11 @@ name: "VITS" -labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] +sample_rate: 22050 + train_dataset: ??? validation_datasets: ??? -test_datasets: null -sample_rate: 22050 -sup_data_path: null -sup_data_types: null +sup_data_path: ??? +sup_data_types: [ "duration_prior"] # LJSpeech stats (per frame), train pitch_mean: 212.35873413085938 @@ -30,8 +28,8 @@ pitch_loss_scale: 0.1 durs_loss_scale: 0.1 mel_loss_scale: 1.0 -phoneme_dict_path: null # "scripts/tts_dataset_files/cmudict-0.7b-030921" -heteronyms_path: null # "scripts/tts_dataset_files/heteronyms-030921" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b-030921" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" filter_channels: 768 filter_length: 1024 @@ -73,7 +71,7 @@ model: train_ds: dataset: - _target_: "nemo.collections.tts.torch.data.MixerTTSDataset" + _target_: "nemo.collections.tts.torch.data.TTSDataset" manifest_filepath: ${train_dataset} sample_rate: ${sample_rate} sup_data_path: ${sup_data_path} @@ -116,7 +114,7 @@ model: validation_ds: dataset: - _target_: "nemo.collections.tts.torch.data.MixerTTSDataset" + _target_: "nemo.collections.tts.torch.data.TTSDataset" manifest_filepath: ${validation_datasets} sample_rate: ${sample_rate} sup_data_path: ${sup_data_path} From 1f5b3674786cb4fe766bd7c569b3202ef316addd Mon Sep 17 00:00:00 2001 From: treacker Date: Wed, 26 Jan 2022 09:12:18 -0800 Subject: [PATCH 054/244] rm yaml --- examples/tts/conf/vits.yaml | 265 ++++++++++++++++++++++++++++++++++++ 1 file changed, 265 insertions(+) create mode 100644 examples/tts/conf/vits.yaml diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml new file mode 100644 index 000000000000..057aa4c6cdd7 --- /dev/null +++ b/examples/tts/conf/vits.yaml @@ -0,0 +1,265 @@ +# This config contains the default values for training Mixer-TTS model on LJSpeech dataset. +# If you want to train model on other dataset, you can change config values according to your dataset. +# Most dataset-specific arguments are in the head of the config file, see below. + +name: "VITS" + +train_dataset: ??? +validation_datasets: ??? +sup_data_path: ??? +sup_data_types: [ "duration_prior"] + +# Default values from librosa.pyin +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +# LJSpeech stats (per frame), train +pitch_mean: 212.35873413085938 +pitch_std: 68.52806091308594 + +# default values for sample_rate=22050 +sample_rate: 22050 +n_mels: 80 +n_window_size: 1024 +n_window_stride: 256 +n_fft: 1024 +lowfreq: 0 +highfreq: null +window: "hann" + +pitch_loss_scale: 0.1 +durs_loss_scale: 0.1 +mel_loss_scale: 1.0 + +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b-030921" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" + +filter_channels: 768 +filter_length: 1024 + +model: + sample_rate: 22050 + splice_length: 64 + lr: 2e-4 + labels: ${labels} + n_speakers: 1 + symbols_embedding_dim: 384 + max_token_duration: 75 + n_mel_channels: ${n_mels} + pitch_embedding_kernel_size: 3 + mel_loss_coeff: 40 + hop_size: 256 + log_interval: 200 + eval_interval: 1000 + seed: 1234 + betas: [0.8,0.99] + eps: 1e-9 + lr_decay: 0.999875 + segment_size: 8192 + init_lr_ratio: 1 + warmup_epochs: 0 + c_mel: 45 + c_kl: 1. + inter_channels: 192 + hidden_channels: 192 + filter_channels: ${filter_channels} + filter_length: ${filter_length} + n_heads: 2 + p_dropout: 0.1 + n_layers_q: 3 + n_layers: 6 + use_spectral_norm: false + mel_fmin: 0.0 + mel_fmax: null + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer + punct: true + stresses: true + chars: true + apostrophe: true + pad_with_space: true + g2p: + _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + + train_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${n_fft} + win_length: ${n_window_size} + hop_length: ${n_window_stride} + window: ${window} + n_mels: ${n_mels} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + + dataloader_params: + drop_last: false + shuffle: true + batch_size: 16 + num_workers: 4 + pin_memory: false + + validation_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${validation_datasets} + sample_rate: ${sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${n_fft} + win_length: ${n_window_size} + hop_length: ${n_window_stride} + window: ${window} + n_mels: ${n_mels} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + + dataloader_params: + drop_last: false + shuffle: true + batch_size: 16 + num_workers: 1 + pin_memory: false + + + preprocessor: + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures + dither: 0.0 + nfilt: ${model.n_mel_channels} + frame_splicing: 1 + highfreq: 8000 + log: true + log_zero_guard_type: clamp + log_zero_guard_value: 1e-05 + lowfreq: 0 + mag_power: 1.0 + n_fft: 1024 + n_window_size: ${n_window_size} + n_window_stride: ${model.hop_size} + normalize: null + pad_to: 1 + pad_value: 0 + preemph: null + stft_conv: false + nb_augmentation_prob : 0 + sample_rate: ${model.sample_rate} + window: hann + exact_pad: true + use_grads: false + + input_fft: + _target_: nemo.collections.tts.modules.transformer.FFTransformerEncoder + n_layer: 6 + n_head: 1 + d_model: ${model.symbols_embedding_dim} + d_head: 64 + d_inner: 1536 + kernel_size: 3 + dropout: 0.1 + dropatt: 0.1 + dropemb: 0.0 + n_embed: 148 # NOTE Should match # of tokens in `symbol_set` + d_embed: ${model.symbols_embedding_dim} + padding_idx: 0 + + output_fft: + _target_: nemo.collections.tts.modules.transformer.FFTransformerDecoder + n_layer: 6 + n_head: 1 + d_model: ${model.symbols_embedding_dim} + d_head: 64 + d_inner: 1536 + kernel_size: 3 + dropout: 0.1 + dropatt: 0.1 + dropemb: 0.0 + + duration_predictor: + _target_: nemo.collections.tts.modules.vits_modules.StochasticDurationPredictor + in_channels: ${model.symbols_embedding_dim} # input_size: ${model.symbols_embedding_dim} + kernel_size: 3 + filter_channels: ${filter_length} # filter_size: 256 + p_dropout: 0.1 # dropout: 0.1 + # n_layers: 6 + + pitch_predictor: + _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor + input_size: ${model.symbols_embedding_dim} + kernel_size: 3 + filter_size: ${filter_length} + dropout: 0.1 + n_layers: ${model.n_layers} + + generator: + _target_: nemo.collections.tts.modules.vits_modules.Generator + resblock: "1" + resblock_kernel_sizes: [3,7,11] + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_rates: [8,8,2,2] + upsample_initial_channel: 512 + upsample_kernel_sizes: [16,16,4,4] + initial_channel: 384 # initial_input_size: 384 + +trainer: + num_nodes: 1 + devices: 1 + accelerator: gpu + strategy: ddp + precision: 16 + max_epochs: 1000 + accumulate_grad_batches: 1 + # gradient_clip_val: 1000.0 + checkpoint_callback: false # Provided by exp_manager + logger: false # Provided by exp_manager + log_every_n_steps: 100 + flush_logs_every_n_steps: 1000 + check_val_every_n_epoch: 5 + +exp_manager: + exp_dir: null + name: ${name} + create_tensorboard_logger: true + create_checkpoint_callback: true + checkpoint_callback_params: + monitor: val_mel_loss + mode: min + create_wandb_logger: false + wandb_logger_kwargs: + name: null + project: null + entity: null + resume_if_exists: false + resume_ignore_no_checkpoint: false \ No newline at end of file From 34f34295e5badeba181f8a0a90428f1b2adc7472 Mon Sep 17 00:00:00 2001 From: treacker Date: Thu, 27 Jan 2022 01:28:07 -0800 Subject: [PATCH 055/244] fix yaml and model --- examples/tts/conf/mixer-tts.yaml | 6 +- examples/tts/conf/vits.yaml | 105 +++------------------------- examples/tts/vits.py | 4 +- nemo/collections/tts/models/vits.py | 69 +++++++++++++++--- nemo/collections/tts/torch/data.py | 81 --------------------- 5 files changed, 75 insertions(+), 190 deletions(-) diff --git a/examples/tts/conf/mixer-tts.yaml b/examples/tts/conf/mixer-tts.yaml index 945eebf3efda..3c523be87874 100644 --- a/examples/tts/conf/mixer-tts.yaml +++ b/examples/tts/conf/mixer-tts.yaml @@ -4,9 +4,9 @@ name: Mixer-TTS -train_dataset: ??? -validation_datasets: ??? -sup_data_path: ??? +train_dataset: "ljspeech_ds/LJSpeech-1.1/train_manifest.json" +validation_datasets: "ljspeech_ds/LJSpeech-1.1/val_manifest.json" +sup_data_path: "ljspeech_ds/LJSpeech-1.1/" sup_data_types: [ "align_prior_matrix", "pitch" ] # Default values from librosa.pyin diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 1208df56b120..979d0cd7b815 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -1,60 +1,42 @@ -<<<<<<< HEAD # This config contains the default values for training Mixer-TTS model on LJSpeech dataset. # If you want to train model on other dataset, you can change config values according to your dataset. # Most dataset-specific arguments are in the head of the config file, see below. -name: "VITS" +name: VITS -======= -name: "VITS" +labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] -sample_rate: 22050 - ->>>>>>> vits -train_dataset: ??? -validation_datasets: ??? -sup_data_path: ??? -sup_data_types: [ "duration_prior"] +train_dataset: "ljspeech_ds/LJSpeech-1.1/train_manifest.json" +validation_datasets: "ljspeech_ds/LJSpeech-1.1/val_manifest.json" +sup_data_path: null +sup_data_types: null -<<<<<<< HEAD # Default values from librosa.pyin pitch_fmin: 65.40639132514966 pitch_fmax: 2093.004522404789 -======= ->>>>>>> vits # LJSpeech stats (per frame), train pitch_mean: 212.35873413085938 pitch_std: 68.52806091308594 -<<<<<<< HEAD # default values for sample_rate=22050 sample_rate: 22050 -======= -# default values from librosa.pyin -pitch_fmin: 65.40639132514966 -pitch_fmax: 2093.004522404789 - -# default values for sample_rate=22050 ->>>>>>> vits n_mels: 80 n_window_size: 1024 n_window_stride: 256 n_fft: 1024 lowfreq: 0 -highfreq: null -window: "hann" +highfreq: 8000 +window: hann pitch_loss_scale: 0.1 durs_loss_scale: 0.1 mel_loss_scale: 1.0 -phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b-030921" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" -<<<<<<< HEAD whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" -======= ->>>>>>> vits filter_channels: 768 filter_length: 1024 @@ -94,7 +76,6 @@ model: mel_fmin: 0.0 mel_fmax: null -<<<<<<< HEAD text_normalizer: _target_: nemo_text_processing.text_normalization.normalize.Normalizer lang: en @@ -118,8 +99,6 @@ model: phoneme_dict: ${phoneme_dict_path} heteronyms: ${heteronyms_path} -======= ->>>>>>> vits train_ds: dataset: _target_: "nemo.collections.tts.torch.data.TTSDataset" @@ -141,33 +120,11 @@ model: pitch_fmin: ${pitch_fmin} pitch_fmax: ${pitch_fmax} -<<<<<<< HEAD dataloader_params: drop_last: false shuffle: true batch_size: 16 -======= - text_tokenizer: - _target_: "nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer" - punct: True - stresses: True - chars: True - space: ' ' - silence: null - apostrophe: True - sep: '|' - add_blank_at: null - pad_with_space: True - g2p: - _target_: "nemo.collections.tts.torch.g2ps.EnglishG2p" - phoneme_dict: ${phoneme_dict_path} - heteronyms: ${heteronyms_path} - dataloader_params: - drop_last: false - shuffle: true - batch_size: 2 ->>>>>>> vits num_workers: 4 pin_memory: false @@ -192,33 +149,11 @@ model: pitch_fmin: ${pitch_fmin} pitch_fmax: ${pitch_fmax} -<<<<<<< HEAD -======= - text_tokenizer: - _target_: "nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer" - punct: True - stresses: True - chars: True - space: ' ' - silence: null - apostrophe: True - sep: '|' - add_blank_at: null - pad_with_space: True - g2p: - _target_: "nemo.collections.tts.torch.g2ps.EnglishG2p" - phoneme_dict: ${phoneme_dict_path} - heteronyms: ${heteronyms_path} ->>>>>>> vits dataloader_params: drop_last: false shuffle: true -<<<<<<< HEAD batch_size: 16 -======= - batch_size: 2 ->>>>>>> vits num_workers: 1 pin_memory: false @@ -302,9 +237,8 @@ model: initial_channel: 384 # initial_input_size: 384 trainer: -<<<<<<< HEAD num_nodes: 1 - devices: 1 + devices: 2 accelerator: gpu strategy: ddp precision: 16 @@ -315,24 +249,11 @@ trainer: logger: false # Provided by exp_manager log_every_n_steps: 100 flush_logs_every_n_steps: 1000 -======= - gpus: -1 # number of gpus - max_epochs: 20000 - num_nodes: 1 - accelerator: ddp - accumulate_grad_batches: 1 - checkpoint_callback: False # Provided by exp_manager - logger: False # Provided by exp_manager - # gradient_clip_val: 1000.0 - flush_logs_every_n_steps: 1000 - log_every_n_steps: 100 ->>>>>>> vits check_val_every_n_epoch: 5 exp_manager: exp_dir: null name: ${name} -<<<<<<< HEAD create_tensorboard_logger: true create_checkpoint_callback: true checkpoint_callback_params: @@ -345,7 +266,3 @@ exp_manager: entity: null resume_if_exists: false resume_ignore_no_checkpoint: false -======= - create_tensorboard_logger: True - create_checkpoint_callback: True ->>>>>>> vits diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 24219dbd97d5..66d29aa9ba35 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -25,9 +25,7 @@ def main(cfg): trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) - lr_logger = pl.callbacks.LearningRateMonitor() - epoch_time_logger = LogEpochTimeCallback() - trainer.callbacks.extend([lr_logger, epoch_time_logger]) + trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) trainer.fit(model) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 684d41d53a37..73325a4e1fac 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -11,9 +11,9 @@ from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator, spec_to_mel_torch, slice_segments, clip_grad_value_ +from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator, audio_to_mel_torch, spec_to_mel_torch, slice_segments, clip_grad_value_ from nemo.core.classes.common import PretrainedModelInfo -from nemo.utils import logging +from nemo.utils import logging, model_utils @dataclass @@ -29,8 +29,24 @@ class VitsConfig: class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): - if isinstance(cfg, dict): - cfg = OmegaConf.create(cfg) + # Convert to Hydra 1.0 compatible DictConfig + cfg = model_utils.convert_model_config_to_dict_config(cfg) + cfg = model_utils.maybe_update_config_version(cfg) + + # setup normalizer + self.normalizer = None + self.text_normalizer_call = None + self.text_normalizer_call_kwargs = {} + self._setup_normalizer(cfg) + + # setup tokenizer + self.tokenizer = None + self._setup_tokenizer(cfg) + assert self.tokenizer is not None + + num_tokens = len(self.tokenizer.tokens) + self.tokenizer_pad = self.tokenizer.pad + self.tokenizer_unk = self.tokenizer.oov super().__init__(cfg=cfg, trainer=trainer) @@ -117,6 +133,38 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): win_length=self.win_length, window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, ) + def _setup_normalizer(self, cfg): + if "text_normalizer" in cfg: + normalizer_kwargs = {} + + if "whitelist" in cfg.text_normalizer: + normalizer_kwargs["whitelist"] = self.register_artifact( + 'text_normalizer.whitelist', cfg.text_normalizer.whitelist + ) + + self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) + self.text_normalizer_call = self.normalizer.normalize + if "text_normalizer_call_kwargs" in cfg: + self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs + + def _setup_tokenizer(self, cfg): + text_tokenizer_kwargs = {} + if "g2p" in cfg.text_tokenizer: + g2p_kwargs = {} + + if "phoneme_dict" in cfg.text_tokenizer.g2p: + g2p_kwargs["phoneme_dict"] = self.register_artifact( + 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, + ) + + if "heteronyms" in cfg.text_tokenizer.g2p: + g2p_kwargs["heteronyms"] = self.register_artifact( + 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, + ) + + text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) + + self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) def parse(self, str_input: str) -> torch.tensor: # TODO: Implement @@ -187,7 +235,7 @@ def training_step(self, batch, batch_idx): ) y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) - y_hat_mel = modules.audio_to_mel_torch( + y_hat_mel = audio_to_mel_torch( y_hat.squeeze(1), self._cfg.filter_length, self._cfg.n_mel_channels, @@ -293,8 +341,7 @@ def validation_step(self, batch, batch_idx): dataformats="HWC", ) - @staticmethod - def _loader(cfg): + def _loader(self, cfg): try: # _ = cfg.model.train_ds.manifest_filepath _ = cfg['dataset']['manifest_filepath'] @@ -302,12 +349,16 @@ def _loader(cfg): logging.warning("manifest_filepath was skipped. No dataset for this model.") return None - dataset = instantiate(cfg.dataset) + dataset = instantiate( + cfg.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer + ) return torch.utils.data.DataLoader( # noqa dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, ) - def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index a3f5c1cf45ab..349867e4a143 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -377,10 +377,7 @@ def get_log_mel(self, audio): def __getitem__(self, index): sample = self.data[index] -<<<<<<< HEAD audio_path_as_text_id = sample["audio_filepath"].replace("/", "-").split(".")[0] -======= ->>>>>>> main # Let's keep audio name and all internal directories in rel_audio_path_as_text_id to avoid any collisions rel_audio_path = Path(sample["audio_filepath"]).relative_to(self.base_data_dir).with_suffix("") @@ -402,14 +399,10 @@ def __getitem__(self, index): if mel_path is not None and Path(mel_path).exists(): log_mel = torch.load(mel_path) else: -<<<<<<< HEAD mel_folder = Path(self.sup_data_path) / "mel" mel_folder.mkdir(exist_ok=True, parents=True) mel_path = mel_folder / f"mel{audio_path_as_text_id}.pt" -======= - mel_path = self.log_mel_folder / f"{rel_audio_path_as_text_id}.pt" ->>>>>>> main if mel_path.exists(): log_mel = torch.load(mel_path) @@ -432,14 +425,10 @@ def __getitem__(self, index): mel_len = self.get_log_mel(audio).shape[2] align_prior_matrix = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item())) else: -<<<<<<< HEAD prior_folder = Path(self.sup_data_path) / "align_prior_matrix" prior_folder.mkdir(exist_ok=True, parents=True) prior_path = prior_folder / f"prior{audio_path_as_text_id}.pt" -======= - prior_path = self.align_prior_matrix_folder / f"{rel_audio_path_as_text_id}.pt" ->>>>>>> main if prior_path.exists(): align_prior_matrix = torch.load(prior_path) @@ -452,14 +441,10 @@ def __getitem__(self, index): # Load pitch if needed pitch, pitch_length = None, None if Pitch in self.sup_data_types_set: -<<<<<<< HEAD pitch_folder = Path(self.sup_data_path) / "pitch" pitch_folder.mkdir(exist_ok=True, parents=True) pitch_path = pitch_folder / f"pitch{audio_path_as_text_id}.pt" -======= - pitch_path = self.pitch_folder / f"{rel_audio_path_as_text_id}.pt" ->>>>>>> main if pitch_path.exists(): pitch = torch.load(pitch_path).float() @@ -485,14 +470,10 @@ def __getitem__(self, index): # Load energy if needed energy, energy_length = None, None if Energy in self.sup_data_types_set: -<<<<<<< HEAD energy_folder = Path(self.sup_data_path) / "energy" energy_folder.mkdir(exist_ok=True, parents=True) energy_path = energy_folder / f"energy{audio_path_as_text_id}.pt" -======= - energy_path = self.energy_folder / f"{rel_audio_path_as_text_id}.pt" ->>>>>>> main if energy_path.exists(): energy = torch.load(energy_path).float() @@ -734,57 +715,19 @@ def _collate_fn(self, batch): class VocoderDataset(Dataset): def __init__( self, -<<<<<<< HEAD manifest_filepath: str, sample_rate: int, n_segments: Optional[int] = None, min_duration: Optional[float] = None, max_duration: Optional[float] = None, ignore_file: Optional[str] = None, -======= - manifest_filepath: Union[str, Path, List[str], List[Path]], - sample_rate: int, - n_segments: Optional[int] = None, - max_duration: Optional[float] = None, - min_duration: Optional[float] = None, - ignore_file: Optional[Union[str, Path]] = None, ->>>>>>> main trim: Optional[bool] = False, load_precomputed_mel: bool = False, hop_length: Optional[int] = None, ): -<<<<<<< HEAD if isinstance(manifest_filepath, str): manifest_filepath = [manifest_filepath] self.manifest_filepath = manifest_filepath -======= - """Dataset which can be used for training and fine-tuning vocoder with pre-computed mel-spectrograms. - Args: - manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing information on the - dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid - json. Each line should contain the following: - "audio_filepath": , - "duration": (Optional), - "mel_filepath": (Optional) - sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. - n_segments (int): The length of audio in samples to load. For example, given a sample rate of 16kHz, and - n_segments=16000, a random 1 second section of audio from the clip will be loaded. The section will - be randomly sampled everytime the audio is batched. Can be set to None to load the entire audio. - Must be specified if load_precomputed_mel is True. - max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be - pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load - audio to compute duration. Defaults to None which does not prune. - min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be - pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load - audio to compute duration. Defaults to None which does not prune. - ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths - that will be pruned prior to training. Defaults to None which does not prune. - trim (bool): Whether to apply librosa.effects.trim to the audio file. Defaults to False. - load_precomputed_mel (bool): Whether to load precomputed mel (useful for fine-tuning). Note: Requires "mel_filepath" to be set in the manifest file. - hop_length (Optional[int]): The hope length between fft computations. Must be specified if load_precomputed_mel is True. - """ - super().__init__() ->>>>>>> main if load_precomputed_mel: if hop_length is None: @@ -793,17 +736,8 @@ def __init__( if n_segments is None: raise ValueError("n_segments must be specified when load_precomputed_mel is True") -<<<<<<< HEAD self.data = [] audio_files = [] -======= - # Initialize and read manifest file(s), filter out data by duration and ignore_file - if isinstance(manifest_filepath, str): - manifest_filepath = [manifest_filepath] - self.manifest_filepath = manifest_filepath - - data = [] ->>>>>>> main total_duration = 0 for manifest_file in self.manifest_filepath: with open(Path(manifest_file).expanduser(), 'r') as f: @@ -820,11 +754,7 @@ def __init__( "duration": item["duration"] if "duration" in item else None, } -<<<<<<< HEAD audio_files.append(file_info) -======= - data.append(file_info) ->>>>>>> main if file_info["duration"] is None: logging.info( @@ -835,7 +765,6 @@ def __init__( if total_duration is not None: total_duration += item["duration"] -<<<<<<< HEAD logging.info(f"Loaded dataset with {len(audio_files)} files.") if total_duration is not None: logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") @@ -875,16 +804,6 @@ def __init__( f"{(total_duration - pruned_duration) / 3600:.2f} hours." ) -======= - logging.info(f"Loaded dataset with {len(data)} files.") - if total_duration is not None: - logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") - - self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) - self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) - - # Initialize audio and mel related parameters ->>>>>>> main self.load_precomputed_mel = load_precomputed_mel self.featurizer = WaveformFeaturizer(sample_rate=sample_rate) self.sample_rate = sample_rate From 33d21d38a02a65d0291770d92be8af5aed1428eb Mon Sep 17 00:00:00 2001 From: treacker Date: Fri, 28 Jan 2022 08:19:28 -0800 Subject: [PATCH 056/244] Added scaler --- examples/tts/vits.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 66d29aa9ba35..ac4441af9755 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -13,6 +13,9 @@ # limitations under the License. import pytorch_lightning as pl +from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin + +from torch.cuda.amp import GradScaler from nemo.collections.common.callbacks import LogEpochTimeCallback from nemo.collections.tts.models.vits import VitsModel @@ -22,7 +25,19 @@ @hydra_runner(config_path="conf", config_name="vits") def main(cfg): - trainer = pl.Trainer(**cfg.trainer) + plugins = [] + if cfg.trainer.precision in [16, 'bf16']: + scaler = GradScaler(enabled=True) + # if cfg.trainer.precision == 16: + # scaler = GradScaler( + # init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32), + # growth_interval=cfg.model.get('native_amp_growth_interval', 1000), + # ) + + plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) + + + trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) From 845257eeb3e09bfe98c9f8f94e54ad0dd92a88fd Mon Sep 17 00:00:00 2001 From: treacker Date: Fri, 28 Jan 2022 08:19:58 -0800 Subject: [PATCH 057/244] refactored yaml --- examples/tts/conf/vits.yaml | 106 +++++++++++++++++++----------------- 1 file changed, 57 insertions(+), 49 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 979d0cd7b815..e9da99edfaaa 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -4,9 +4,6 @@ name: VITS -labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] - train_dataset: "ljspeech_ds/LJSpeech-1.1/train_manifest.json" validation_datasets: "ljspeech_ds/LJSpeech-1.1/val_manifest.json" sup_data_path: null @@ -22,7 +19,7 @@ pitch_std: 68.52806091308594 # default values for sample_rate=22050 sample_rate: 22050 -n_mels: 80 +n_mel_channels: 80 n_window_size: 1024 n_window_stride: 256 n_fft: 1024 @@ -42,19 +39,26 @@ filter_channels: 768 filter_length: 1024 model: - sample_rate: 22050 + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + sample_rate: ${sample_rate} + n_mel_channels: ${n_mel_channels} + n_window_size: ${n_window_size} + n_window_stride: ${n_window_stride} + n_fft: ${n_fft} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + window: ${window} + + splice_length: 64 lr: 2e-4 - labels: ${labels} n_speakers: 1 symbols_embedding_dim: 384 max_token_duration: 75 - n_mel_channels: ${n_mels} pitch_embedding_kernel_size: 3 - mel_loss_coeff: 40 - hop_size: 256 - log_interval: 200 - eval_interval: 1000 + seed: 1234 betas: [0.8,0.99] eps: 1e-9 @@ -64,6 +68,7 @@ model: warmup_epochs: 0 c_mel: 45 c_kl: 1. + inter_channels: 192 hidden_channels: 192 filter_channels: ${filter_channels} @@ -103,22 +108,22 @@ model: dataset: _target_: "nemo.collections.tts.torch.data.TTSDataset" manifest_filepath: ${train_dataset} - sample_rate: ${sample_rate} + sample_rate: ${model.sample_rate} sup_data_path: ${sup_data_path} sup_data_types: ${sup_data_types} - n_fft: ${n_fft} - win_length: ${n_window_size} - hop_length: ${n_window_stride} - window: ${window} - n_mels: ${n_mels} - lowfreq: ${lowfreq} - highfreq: ${highfreq} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} max_duration: null min_duration: 0.1 ignore_file: null trim: False - pitch_fmin: ${pitch_fmin} - pitch_fmax: ${pitch_fmax} + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} dataloader_params: @@ -132,22 +137,22 @@ model: dataset: _target_: "nemo.collections.tts.torch.data.TTSDataset" manifest_filepath: ${validation_datasets} - sample_rate: ${sample_rate} + sample_rate: ${model.sample_rate} sup_data_path: ${sup_data_path} sup_data_types: ${sup_data_types} - n_fft: ${n_fft} - win_length: ${n_window_size} - hop_length: ${n_window_stride} - window: ${window} - n_mels: ${n_mels} - lowfreq: ${lowfreq} - highfreq: ${highfreq} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} max_duration: null min_duration: 0.1 ignore_file: null trim: False - pitch_fmin: ${pitch_fmin} - pitch_fmax: ${pitch_fmax} + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} dataloader_params: @@ -159,27 +164,30 @@ model: preprocessor: - _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures - dither: 0.0 + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures #change to STFT + nfilt: ${model.n_mel_channels} - frame_splicing: 1 - highfreq: 8000 + highfreq: ${model.highfreq} log: true log_zero_guard_type: clamp log_zero_guard_value: 1e-05 - lowfreq: 0 - mag_power: 1.0 - n_fft: 1024 - n_window_size: ${n_window_size} - n_window_stride: ${model.hop_size} - normalize: null + lowfreq: ${model.lowfreq} + + n_fft: ${model.n_fft} + n_window_size: ${model.n_window_size} + n_window_stride: ${model.n_window_stride} pad_to: 1 pad_value: 0 + sample_rate: ${model.sample_rate} + window: ${model.window} + normalize: null preemph: null + dither: 0.0 + frame_splicing: 1 stft_conv: false nb_augmentation_prob : 0 - sample_rate: ${model.sample_rate} - window: hann + mag_power: 1.0 + exact_pad: true use_grads: false @@ -238,7 +246,7 @@ model: trainer: num_nodes: 1 - devices: 2 + devices: 4 accelerator: gpu strategy: ddp precision: 16 @@ -247,12 +255,12 @@ trainer: # gradient_clip_val: 1000.0 checkpoint_callback: false # Provided by exp_manager logger: false # Provided by exp_manager - log_every_n_steps: 100 + log_every_n_steps: 10 flush_logs_every_n_steps: 1000 check_val_every_n_epoch: 5 exp_manager: - exp_dir: null + exp_dir: first_run_vits name: ${name} create_tensorboard_logger: true create_checkpoint_callback: true @@ -261,8 +269,8 @@ exp_manager: mode: min create_wandb_logger: false wandb_logger_kwargs: - name: null - project: null - entity: null + name: ${name} + project: VITS + entity: treacker resume_if_exists: false resume_ignore_no_checkpoint: false From d6ff4c7dad38828e4d6032377590bb488902a18d Mon Sep 17 00:00:00 2001 From: treacker Date: Fri, 28 Jan 2022 08:22:43 -0800 Subject: [PATCH 058/244] managed to run in fp16 --- nemo/collections/tts/models/vits.py | 150 ++++++++++++++++++---------- 1 file changed, 97 insertions(+), 53 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 73325a4e1fac..ca7c6adce2ee 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -1,13 +1,18 @@ +from encodings import normalize_encoding import omegaconf import torch from dataclasses import dataclass from hydra.utils import instantiate from omegaconf import MISSING, DictConfig, OmegaConf +from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin from pytorch_lightning import Trainer +from pytorch_lightning.loggers import WandbLogger from torch.cuda.amp import autocast from torch.nn import functional as F from typing import Any, Dict +import wandb +from nemo.collections.nlp.parts.nlp_overrides import GradScaler from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform @@ -15,6 +20,9 @@ from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging, model_utils +# to call optimizer_step +def closure(): + return @dataclass class VitsConfig: @@ -47,6 +55,8 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): num_tokens = len(self.tokenizer.tokens) self.tokenizer_pad = self.tokenizer.pad self.tokenizer_unk = self.tokenizer.oov + + # self.scaler = trainer.precision_plugin.scaler super().__init__(cfg=cfg, trainer=trainer) @@ -58,7 +68,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig") # Ensure passed cfg is compliant with schema OmegaConf.merge(cfg, schema) - + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) self.encoder = instantiate(cfg.input_fft) @@ -85,15 +95,12 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.register_buffer('pitch_mean', torch.zeros(1)) self.register_buffer('pitch_std', torch.zeros(1)) - self.mel_loss_coeff = cfg.mel_loss_coeff - self.log_train_images = False self.logged_real_samples = False self._tb_logger = None self.hann_window = None - self.splice_length = cfg.splice_length self.sample_rate = cfg.sample_rate - self.hop_size = cfg.hop_size + self.hop_size = cfg.n_window_stride self.n_fft = cfg.train_ds.dataset.n_fft self.win_length = cfg.train_ds.dataset.win_length @@ -133,6 +140,9 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): win_length=self.win_length, window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, ) + + self.precision_plugin = self.trainer.accelerator.precision_plugin # to call optimizer_step + def _setup_normalizer(self, cfg): if "text_normalizer" in cfg: normalizer_kwargs = {} @@ -204,7 +214,7 @@ def forward(self, batch, batch_idx): y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.hop_size - + return y_hat, y_hat_lengths @@ -217,14 +227,17 @@ def get_spec(self, audio): return spec def training_step(self, batch, batch_idx): + + (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) - with autocast(enabled=False): + with autocast(enabled=True): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) + mel = spec_to_mel_torch( spec, self._cfg.filter_length, @@ -233,29 +246,44 @@ def training_step(self, batch, batch_idx): self._cfg.mel_fmin, self._cfg.mel_fmax ) - y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) + y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) + + y_hat = y_hat.float() + y_hat_mel = audio_to_mel_torch( + y_hat.squeeze(1), + self._cfg.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self.cfg.n_window_stride, + self._cfg.preprocessor.n_window_size, + self._cfg.mel_fmin, + self._cfg.mel_fmax + ) - y_hat_mel = audio_to_mel_torch( - y_hat.squeeze(1), - self._cfg.filter_length, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.hop_size, - self._cfg.preprocessor.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax - ) - y = torch.unsqueeze(y, 1) - y = slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + + y = torch.unsqueeze(y, 1) + y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) # slice + + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc # train discriminator + # self.optim_d.zero_grad() + # self.scaler.scale(loss_disc_all).backward() + # self.scaler.unscale_(self.optim_d) + + # norm_d = clip_grad_value_(self.net_d.parameters(), None)#self.cfg.trainer.gradient_clip_val) + # self.scaler.update() + # self.scaler.step(self.optim_d) + self.optim_d.zero_grad() self.manual_backward(loss_disc_all) - clip_grad_value_(self.net_d.parameters(), None) - self.optim_d.step() + self.precision_plugin.optimizer_step(self, self.optim_d, 0, closure) # dunno why + norm_d = clip_grad_value_(self.net_d.parameters(), None) + + # print('grad_d', norm_d) with autocast(enabled=True): # Generator @@ -268,12 +296,24 @@ def training_step(self, batch, batch_idx): loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + # train generator + + + # self.optim_g.zero_grad() + # self.scaler.scale(loss_gen_all).backward() + # self.scaler.unscale_(self.optim_g) + # norm_g = clip_grad_value_(self.net_g.parameters(), None) # self.cfg.trainer.gradient_clip_val) + # self.scaler.update() + # self.scaler.step(self.optim_g) + self.optim_g.zero_grad() self.manual_backward(loss_gen_all) - clip_grad_value_(self.net_g.parameters(), None) - self.optim_d.step() + self.precision_plugin.optimizer_step(self, self.optim_g, 1, closure) # dunno why + norm_g = clip_grad_value_(self.net_g.parameters(), None) + # print('grad_g', norm_g) schedulers = self.lr_schedulers() if schedulers is not None: sch1, sch2 = schedulers @@ -288,6 +328,8 @@ def training_step(self, batch, batch_idx): "loss_kl * c_kl": loss_kl, "loss_gen_all": loss_gen_all, "loss_disc_all": loss_disc_all, + "grad_gen" : norm_g, + "grad_disc" : norm_d, } for i, v in enumerate(losses_gen): @@ -312,34 +354,36 @@ def validation_step(self, batch, batch_idx): y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) # plot audio once per epoch - if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: - self.logger.experiment.add_audio( - "val_wav_target", - y[0, : y_lengths[0]].data.cpu().numpy(), - self.global_step, - sample_rate=self.sample_rate, - ) - - self.logger.experiment.add_audio( - "val_wav_predicted", - y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), - self.global_step, - sample_rate=self.sample_rate, - ) - - self.logger.experiment.add_image( - "val_mel_target", - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) - - self.logger.experiment.add_image( - "val_mel_predicted", - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - self.global_step, - dataformats="HWC", - ) + if batch_idx == 0 and self.logger is not None and isinstance(self.logger, WandbLogger): + specs = [] + audios = [] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), + caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + caption=f"val_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + y[0, : y_lengths[0]].data.cpu().numpy(), + caption=f"val_wav_target", + sample_rate=self.sample_rate, + ), + wandb.Audio( + y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), + caption=f"val_wav_predicted", + sample_rate=self.sample_rate, + ), + ] + + self.logger.experiment.log({"specs": specs, "audios": audios}) + def _loader(self, cfg): try: From 733e6b416968f63eb722c4756ffa9f8c256d9556 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Sat, 29 Jan 2022 20:09:05 +0300 Subject: [PATCH 059/244] refactoring Signed-off-by: Oktai Tatanov --- examples/tts/conf/vits.yaml | 52 +------- examples/tts/vits.py | 7 -- nemo/collections/tts/losses/vits_losses.py | 1 + nemo/collections/tts/models/vits.py | 114 +++++------------- .../tts/modules/vits_mel_processing.py | 77 ------------ nemo/collections/tts/modules/vits_modules.py | 14 ++- 6 files changed, 46 insertions(+), 219 deletions(-) delete mode 100644 nemo/collections/tts/modules/vits_mel_processing.py diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index e9da99edfaaa..e8662dbf8423 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -1,7 +1,9 @@ -# This config contains the default values for training Mixer-TTS model on LJSpeech dataset. +# This config contains the default values for training VITS model on LJSpeech dataset. # If you want to train model on other dataset, you can change config values according to your dataset. # Most dataset-specific arguments are in the head of the config file, see below. +# TODO: remove unnecessary arguments + name: VITS train_dataset: "ljspeech_ds/LJSpeech-1.1/train_manifest.json" @@ -51,7 +53,6 @@ model: highfreq: ${highfreq} window: ${window} - splice_length: 64 lr: 2e-4 n_speakers: 1 @@ -164,8 +165,8 @@ model: preprocessor: - _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures #change to STFT - + # TODO: change to STFT + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures nfilt: ${model.n_mel_channels} highfreq: ${model.highfreq} log: true @@ -191,49 +192,6 @@ model: exact_pad: true use_grads: false - input_fft: - _target_: nemo.collections.tts.modules.transformer.FFTransformerEncoder - n_layer: 6 - n_head: 1 - d_model: ${model.symbols_embedding_dim} - d_head: 64 - d_inner: 1536 - kernel_size: 3 - dropout: 0.1 - dropatt: 0.1 - dropemb: 0.0 - n_embed: 148 # NOTE Should match # of tokens in `symbol_set` - d_embed: ${model.symbols_embedding_dim} - padding_idx: 0 - - output_fft: - _target_: nemo.collections.tts.modules.transformer.FFTransformerDecoder - n_layer: 6 - n_head: 1 - d_model: ${model.symbols_embedding_dim} - d_head: 64 - d_inner: 1536 - kernel_size: 3 - dropout: 0.1 - dropatt: 0.1 - dropemb: 0.0 - - duration_predictor: - _target_: nemo.collections.tts.modules.vits_modules.StochasticDurationPredictor - in_channels: ${model.symbols_embedding_dim} # input_size: ${model.symbols_embedding_dim} - kernel_size: 3 - filter_channels: ${filter_length} # filter_size: 256 - p_dropout: 0.1 # dropout: 0.1 - # n_layers: 6 - - pitch_predictor: - _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor - input_size: ${model.symbols_embedding_dim} - kernel_size: 3 - filter_size: ${filter_length} - dropout: 0.1 - n_layers: ${model.n_layers} - generator: _target_: nemo.collections.tts.modules.vits_modules.Generator resblock: "1" diff --git a/examples/tts/vits.py b/examples/tts/vits.py index ac4441af9755..196859b3652d 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -28,15 +28,8 @@ def main(cfg): plugins = [] if cfg.trainer.precision in [16, 'bf16']: scaler = GradScaler(enabled=True) - # if cfg.trainer.precision == 16: - # scaler = GradScaler( - # init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32), - # growth_interval=cfg.model.get('native_amp_growth_interval', 1000), - # ) - plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) - trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 10e1f7836aec..493373322244 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -4,6 +4,7 @@ from nemo.core.neural_types.elements import LossType, VoidType from nemo.core.neural_types.neural_type import NeuralType +# TODO: check if we can use the same losses from other modules class FeatureLoss(Loss): def input_types(self): diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index ca7c6adce2ee..23480d3b8c13 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -1,18 +1,13 @@ -from encodings import normalize_encoding import omegaconf import torch -from dataclasses import dataclass from hydra.utils import instantiate -from omegaconf import MISSING, DictConfig, OmegaConf -from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin +from omegaconf import DictConfig from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger from torch.cuda.amp import autocast from torch.nn import functional as F -from typing import Any, Dict import wandb -from nemo.collections.nlp.parts.nlp_overrides import GradScaler from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform @@ -20,23 +15,14 @@ from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging, model_utils +# TODO: remove if not needed # to call optimizer_step -def closure(): - return - -@dataclass -class VitsConfig: - parser: Dict[Any, Any] = MISSING - preprocessor: Dict[Any, Any] = MISSING - input_fft: Dict[Any, Any] = MISSING - output_fft: Dict[Any, Any] = MISSING - duration_predictor: Dict[Any, Any] = MISSING - pitch_predictor: Dict[Any, Any] = MISSING +# def closure(): +# return class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): - # Convert to Hydra 1.0 compatible DictConfig cfg = model_utils.convert_model_config_to_dict_config(cfg) cfg = model_utils.maybe_update_config_version(cfg) @@ -55,26 +41,12 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): num_tokens = len(self.tokenizer.tokens) self.tokenizer_pad = self.tokenizer.pad self.tokenizer_unk = self.tokenizer.oov - - # self.scaler = trainer.precision_plugin.scaler super().__init__(cfg=cfg, trainer=trainer) - - schema = OmegaConf.structured(VitsConfig) - # ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes - if isinstance(cfg, dict): - cfg = OmegaConf.create(cfg) - elif not isinstance(cfg, DictConfig): - raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig") - # Ensure passed cfg is compliant with schema - OmegaConf.merge(cfg, schema) self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) - self.encoder = instantiate(cfg.input_fft) - self.duration_predictor = instantiate(cfg.duration_predictor) - self.pitch_predictor = instantiate(cfg.pitch_predictor) - + # TODO: how model knows padding idx? num tokens? self.generator = instantiate(cfg.generator) self.multiperioddisc = MultiPeriodDiscriminator() self.feat_matching_loss = FeatureLoss() @@ -82,19 +54,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.gen_loss = GeneratorLoss() self.kl_loss = KlLoss() - self.max_token_duration = cfg.max_token_duration - - self.pitch_emb = torch.nn.Conv1d( - 1, - cfg.symbols_embedding_dim, - kernel_size=cfg.pitch_embedding_kernel_size, - padding=int((cfg.pitch_embedding_kernel_size - 1) / 2), - ) - - # Store values precomputed from training data for convenience - self.register_buffer('pitch_mean', torch.zeros(1)) - self.register_buffer('pitch_std', torch.zeros(1)) - self.log_train_images = False self.logged_real_samples = False self._tb_logger = None @@ -141,7 +100,8 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, ) - self.precision_plugin = self.trainer.accelerator.precision_plugin # to call optimizer_step + # TODO: remove if not needed + # self.precision_plugin = self.trainer.accelerator.precision_plugin # to call optimizer_step def _setup_normalizer(self, cfg): if "text_normalizer" in cfg: @@ -181,12 +141,12 @@ def parse(self, str_input: str) -> torch.tensor: pass def configure_optimizers(self): - self.optim_g = torch.optim.AdamW( + optim_g = torch.optim.AdamW( self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) - self.optim_d = torch.optim.AdamW( + optim_d = torch.optim.AdamW( self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, @@ -202,9 +162,10 @@ def configure_optimizers(self): 'scheduler': scheduler_d, 'interval': 'step' } - return [self.optim_g, self.optim_d], [scheduler_g_dict, scheduler_d_dict] + return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] def forward(self, batch, batch_idx): + # TODO: Check if this is correct with torch.no_grad(): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -217,7 +178,6 @@ def forward(self, batch, batch_idx): return y_hat, y_hat_lengths - def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): spec = self.stft(audio) @@ -227,8 +187,8 @@ def get_spec(self, audio): return spec def training_step(self, batch, batch_idx): + # TODO: support accum gradient or don't allow to use accum gradient in init - (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) @@ -244,7 +204,7 @@ def training_step(self, batch, batch_idx): self._cfg.n_mel_channels, self._cfg.sample_rate, self._cfg.mel_fmin, - self._cfg.mel_fmax + self._cfg.mel_fmax, ) y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) @@ -257,34 +217,28 @@ def training_step(self, batch, batch_idx): self.cfg.n_window_stride, self._cfg.preprocessor.n_window_size, self._cfg.mel_fmin, - self._cfg.mel_fmax + self._cfg.mel_fmax, ) - y = torch.unsqueeze(y, 1) - y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) # slice + y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc - # train discriminator - # self.optim_d.zero_grad() - # self.scaler.scale(loss_disc_all).backward() - # self.scaler.unscale_(self.optim_d) + # get optimizers + optimizers, _ = self.optimizers() + optim_g, optim_d = optimizers - # norm_d = clip_grad_value_(self.net_d.parameters(), None)#self.cfg.trainer.gradient_clip_val) - # self.scaler.update() - # self.scaler.step(self.optim_d) - - self.optim_d.zero_grad() + # train discriminator + optim_d.zero_grad() self.manual_backward(loss_disc_all) - self.precision_plugin.optimizer_step(self, self.optim_d, 0, closure) # dunno why + optim_d.step() + # TODO: maybe change it to PTL-based function norm_d = clip_grad_value_(self.net_d.parameters(), None) - # print('grad_d', norm_d) - with autocast(enabled=True): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) @@ -296,24 +250,12 @@ def training_step(self, batch, batch_idx): loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - - # train generator - - - # self.optim_g.zero_grad() - # self.scaler.scale(loss_gen_all).backward() - # self.scaler.unscale_(self.optim_g) - # norm_g = clip_grad_value_(self.net_g.parameters(), None) # self.cfg.trainer.gradient_clip_val) - # self.scaler.update() - # self.scaler.step(self.optim_g) - - self.optim_g.zero_grad() + optim_g.zero_grad() self.manual_backward(loss_gen_all) - self.precision_plugin.optimizer_step(self, self.optim_g, 1, closure) # dunno why + optim_g.step() norm_g = clip_grad_value_(self.net_g.parameters(), None) - # print('grad_g', norm_g) schedulers = self.lr_schedulers() if schedulers is not None: sch1, sch2 = schedulers @@ -333,19 +275,20 @@ def training_step(self, batch, batch_idx): } for i, v in enumerate(losses_gen): - metrics["loss_gen_i_{}".format(i)] = v + metrics[f"loss_gen_i_{i}"] = v for i, v in enumerate(losses_disc_r): - metrics["loss_disc_r_{}".format(i)] = v + metrics[f"loss_disc_r_{i}"] = v for i, v in enumerate(losses_disc_g): - metrics["loss_disc_g_{}".format(i)] = v + metrics[f"loss_disc_g_{i}"] = v self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): (y, y_lengths, x, x_lengths) = batch + # TODO: fix hardcode y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) y_hat = y_hat.squeeze() y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length @@ -383,7 +326,6 @@ def validation_step(self, batch, batch_idx): ] self.logger.experiment.log({"specs": specs, "audios": audios}) - def _loader(self, cfg): try: diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py deleted file mode 100644 index 0d8bc0c15522..000000000000 --- a/nemo/collections/tts/modules/vits_mel_processing.py +++ /dev/null @@ -1,77 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) - melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - return melspec \ No newline at end of file diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 9545024fc195..a5676df2a338 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -1,4 +1,3 @@ -import numpy as np import math import numpy as np @@ -6,15 +5,26 @@ from torch import nn from torch.nn import Conv1d, ConvTranspose1d, Conv2d from torch.nn import functional as F +from librosa.filters import mel as librosa_mel_fn from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm from nemo.collections.tts.modules.monotonic_align import maximum_path -from nemo.collections.tts.modules.vits_mel_processing import librosa_mel_fn, spectral_normalize_torch + +# TODO: LARGE refactoring LRELU_SLOPE = 0.1 +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): super().__init__() From 70f317155f26d2e7847fbf6d33c0ccda506432b0 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Sun, 30 Jan 2022 11:48:22 +0300 Subject: [PATCH 060/244] fix small bugs and add new todos Signed-off-by: Oktai Tatanov --- examples/tts/conf/vits.yaml | 5 +--- nemo/collections/tts/models/vits.py | 5 ++-- nemo/collections/tts/modules/vits_modules.py | 24 ++++++++------------ 3 files changed, 13 insertions(+), 21 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index e8662dbf8423..3eae3d6dfe38 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -2,7 +2,7 @@ # If you want to train model on other dataset, you can change config values according to your dataset. # Most dataset-specific arguments are in the head of the config file, see below. -# TODO: remove unnecessary arguments +# TODO: remove unnecessary arguments, refactoring name: VITS @@ -163,7 +163,6 @@ model: num_workers: 1 pin_memory: false - preprocessor: # TODO: change to STFT _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures @@ -173,7 +172,6 @@ model: log_zero_guard_type: clamp log_zero_guard_value: 1e-05 lowfreq: ${model.lowfreq} - n_fft: ${model.n_fft} n_window_size: ${model.n_window_size} n_window_stride: ${model.n_window_stride} @@ -188,7 +186,6 @@ model: stft_conv: false nb_augmentation_prob : 0 mag_power: 1.0 - exact_pad: true use_grads: false diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 23480d3b8c13..f666681de02b 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -46,8 +46,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) - # TODO: how model knows padding idx? num tokens? - self.generator = instantiate(cfg.generator) self.multiperioddisc = MultiPeriodDiscriminator() self.feat_matching_loss = FeatureLoss() self.disc_loss = DiscriminatorLoss() @@ -63,6 +61,8 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.n_fft = cfg.train_ds.dataset.n_fft self.win_length = cfg.train_ds.dataset.win_length + # TODO: need to add SynthesizerTrn in config + # TODO: how model knows padding idx? num tokens? self.net_g = SynthesizerTrn( n_vocab = cfg.symbols_embedding_dim, spec_channels = cfg.train_ds.dataset.n_fft // 2 + 1, @@ -188,7 +188,6 @@ def get_spec(self, audio): def training_step(self, batch, batch_idx): # TODO: support accum gradient or don't allow to use accum gradient in init - (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index a5676df2a338..7cff88952748 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -10,7 +10,7 @@ from nemo.collections.tts.modules.monotonic_align import maximum_path -# TODO: LARGE refactoring +# TODO: need to do LARGE refactoring LRELU_SLOPE = 0.1 @@ -192,7 +192,7 @@ def remove_weight_norm(self): for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) - +# TODO: reuse from hifigan if it is possible? class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() @@ -237,7 +237,7 @@ def remove_weight_norm(self): for l in self.convs2: remove_weight_norm(l) - +# TODO: reuse from hifigan if it is possible? class ResBlock2(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3)): super(ResBlock2, self).__init__() @@ -538,6 +538,7 @@ def __init__(self, self.kernel_size = kernel_size self.p_dropout = p_dropout + # TODO: specify padding idx self.emb = nn.Embedding(n_vocab, hidden_channels) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) @@ -626,7 +627,7 @@ def forward(self, x, x_lengths, g=None): z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask - +# TODO: reuse from hifigan if it is possible? class Generator(torch.nn.Module): def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): super(Generator, self).__init__() @@ -681,7 +682,7 @@ def remove_weight_norm(self): for l in self.resblocks: l.remove_weight_norm() - +# TODO: reuse from hifigan if it is possible? class DiscriminatorP(torch.nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() @@ -718,7 +719,7 @@ def forward(self, x): return x, fmap - +# TODO: reuse from hifigan if it is possible? class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() @@ -746,7 +747,7 @@ def forward(self, x): return x, fmap - +# TODO: reuse from hifigan if it is possible? class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(MultiPeriodDiscriminator, self).__init__() @@ -907,6 +908,7 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca o = self.dec((z * y_mask)[:,:,:max_len], g=g) return o, attn, y_mask, (z, z_p, m_p, logs_p) + # TODO: do we really need it? def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): assert self.n_speakers > 0, "n_speakers have to be larger than 0." g_src = self.emb_g(sid_src).unsqueeze(-1) @@ -980,12 +982,6 @@ def get_padding(kernel_size, dilation=1): return int((kernel_size*dilation - dilation)/2) -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - def intersperse(lst, item): result = [item] * (len(lst) * 2 + 1) result[1::2] = lst @@ -1083,7 +1079,7 @@ def shift_1d(x): x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] return x - +# TODO: reuse from helpers get_mask_from_lengths? def sequence_mask(length, max_length=None): if max_length is None: max_length = length.max() From 6be1cee0c7d2b90275a075a304e6780dd776c085 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Sun, 30 Jan 2022 18:04:52 +0300 Subject: [PATCH 061/244] fix optimizers Signed-off-by: Oktai Tatanov --- nemo/collections/tts/models/vits.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index f666681de02b..a1f6dd94faef 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -228,8 +228,7 @@ def training_step(self, batch, batch_idx): loss_disc_all = loss_disc # get optimizers - optimizers, _ = self.optimizers() - optim_g, optim_d = optimizers + optim_g, optim_d = self.optimizers() # train discriminator optim_d.zero_grad() From 3f8ca4c2afd901fa0ae2cfa518af683d4a00ef4b Mon Sep 17 00:00:00 2001 From: jasonjjl1999 <43978361+jasonjjl1999@users.noreply.github.com> Date: Thu, 3 Feb 2022 11:30:32 -0500 Subject: [PATCH 062/244] Port Variational Inference with Adversarial Learning (VITS) to NeMo TTS (#6) * Add vits files Add vits_losses.py, vits_modules.py and vits.py. * Move non-vits models to modules * Add vits.yaml * Add _loader to vits.py * Add basic template for vits * Update vits.yaml with vits parameters * Remove extra space * Add top level training script * Add some variables to vits yaml * Add forward and training methods * Fix imports * Added validation step * Log training losses * Update loss calls to use class attributes * Add VITS to models list * Fix all imports * Remove old module calls * Fix typo in monotonic align import * Modified validation step 1. reverted to tensorboard 2. validation_step logs audio, mel-spec for batch 0 3. validation_step_alt logs audio, mel-spec for batch 0 and loss_mel * Fix imports for VITS * Remove old module calls * Fix typo in monotonic align import * Modified validation step 1. reverted to tensorboard 2. validation_step logs audio, mel-spec for batch 0 3. validation_step_alt logs audio, mel-spec for batch 0 and loss_mel * Add parameters from original VITS config * Fix config file * Fix imports and generate spec from audio * Fix incorrect dimensions * Progress update * Fix loss * Fix cuda thing * Fix monotonic align import * Fix typos in vits.py * Disable loss typecheck * Fix spectrogram lengths * Remove Precision 16 requirement * Address lgtm alerts * clean up unused code * Address lgtm alerts * Refactor audio_to_mel_torch method * Use NeMo FilterBank to get melspec Todo: set self.fb * Fix filterbank max frequency to match with original VITS * Fix filterbank features correct length * Address lgtm issues * Remove print statements * Remove stft_pad_amount Co-authored-by: martynwei Co-authored-by: Ryan Hong <66425733+rhong99@users.noreply.github.com> Co-authored-by: richa.ren@mail.utoronto.ca Co-authored-by: Jason Signed-off-by: Jason --- examples/tts/conf/vits.yaml | 257 + examples/tts/vits.py | 35 + .../asr/parts/preprocessing/features.py | 1 - nemo/collections/tts/losses/vits_losses.py | 126 + nemo/collections/tts/models/__init__.py | 2 + nemo/collections/tts/models/vits.py | 330 + .../tts/modules/monotonic_align/__init__.py | 19 + .../tts/modules/monotonic_align/core.c | 21299 ++++++++++++++++ .../tts/modules/monotonic_align/core.pyx | 42 + .../tts/modules/monotonic_align/setup.py | 9 + .../tts/modules/vits_mel_processing.py | 77 + nemo/collections/tts/modules/vits_modules.py | 1603 ++ setup.py | 13 + 13 files changed, 23812 insertions(+), 1 deletion(-) create mode 100644 examples/tts/conf/vits.yaml create mode 100644 examples/tts/vits.py create mode 100644 nemo/collections/tts/losses/vits_losses.py create mode 100644 nemo/collections/tts/models/vits.py create mode 100644 nemo/collections/tts/modules/monotonic_align/__init__.py create mode 100644 nemo/collections/tts/modules/monotonic_align/core.c create mode 100644 nemo/collections/tts/modules/monotonic_align/core.pyx create mode 100644 nemo/collections/tts/modules/monotonic_align/setup.py create mode 100644 nemo/collections/tts/modules/vits_mel_processing.py create mode 100644 nemo/collections/tts/modules/vits_modules.py diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml new file mode 100644 index 000000000000..20146d0902ef --- /dev/null +++ b/examples/tts/conf/vits.yaml @@ -0,0 +1,257 @@ +name: "VITS" + +labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', 'd', 'e', 'f', 'g', + 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] +train_dataset: ??? +validation_datasets: ??? +test_datasets: null +sample_rate: 22050 +sup_data_path: null +sup_data_types: null + +# LJSpeech stats (per frame), train +pitch_mean: 212.35873413085938 +pitch_std: 68.52806091308594 + +# default values from librosa.pyin +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +# default values for sample_rate=22050 +n_mels: 80 +n_window_size: 1024 +n_window_stride: 256 +n_fft: 1024 +lowfreq: 0 +highfreq: null +window: "hann" + +pitch_loss_scale: 0.1 +durs_loss_scale: 0.1 +mel_loss_scale: 1.0 + +phoneme_dict_path: null # "scripts/tts_dataset_files/cmudict-0.7b-030921" +heteronyms_path: null # "scripts/tts_dataset_files/heteronyms-030921" + +filter_channels: 768 +filter_length: 1024 + +model: + sample_rate: 22050 + splice_length: 64 + lr: 2e-4 + labels: ${labels} + n_speakers: 1 + symbols_embedding_dim: 384 + max_token_duration: 75 + n_mel_channels: ${n_mels} + pitch_embedding_kernel_size: 3 + mel_loss_coeff: 40 + hop_size: 256 + log_interval: 200 + eval_interval: 1000 + seed: 1234 + betas: [0.8,0.99] + eps: 1e-9 + lr_decay: 0.999875 + segment_size: 8192 + init_lr_ratio: 1 + warmup_epochs: 0 + c_mel: 45 + c_kl: 1. + inter_channels: 192 + hidden_channels: 192 + filter_channels: ${filter_channels} + filter_length: ${filter_length} + n_heads: 2 + p_dropout: 0.1 + n_layers_q: 3 + n_layers: 6 + use_spectral_norm: false + mel_fmin: 0.0 + mel_fmax: null + + train_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.MixerTTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${n_fft} + win_length: ${n_window_size} + hop_length: ${n_window_stride} + window: ${window} + n_mels: ${n_mels} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + text_tokenizer: + _target_: "nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer" + punct: True + stresses: True + chars: True + space: ' ' + silence: null + apostrophe: True + sep: '|' + add_blank_at: null + pad_with_space: True + g2p: + _target_: "nemo.collections.tts.torch.g2ps.EnglishG2p" + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + dataloader_params: + drop_last: false + shuffle: true + batch_size: 2 + num_workers: 4 + pin_memory: false + + validation_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.MixerTTSDataset" + manifest_filepath: ${validation_datasets} + sample_rate: ${sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${n_fft} + win_length: ${n_window_size} + hop_length: ${n_window_stride} + window: ${window} + n_mels: ${n_mels} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + text_tokenizer: + _target_: "nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer" + punct: True + stresses: True + chars: True + space: ' ' + silence: null + apostrophe: True + sep: '|' + add_blank_at: null + pad_with_space: True + g2p: + _target_: "nemo.collections.tts.torch.g2ps.EnglishG2p" + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + + dataloader_params: + drop_last: false + shuffle: true + batch_size: 2 + num_workers: 1 + pin_memory: false + + + preprocessor: + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures + dither: 0.0 + nfilt: ${model.n_mel_channels} + frame_splicing: 1 + highfreq: 8000 + log: true + log_zero_guard_type: clamp + log_zero_guard_value: 1e-05 + lowfreq: 0 + mag_power: 1.0 + n_fft: 1024 + n_window_size: ${n_window_size} + n_window_stride: ${model.hop_size} + normalize: null + pad_to: 1 + pad_value: 0 + preemph: null + stft_conv: false + nb_augmentation_prob : 0 + sample_rate: ${model.sample_rate} + window: hann + exact_pad: true + use_grads: false + + input_fft: + _target_: nemo.collections.tts.modules.transformer.FFTransformerEncoder + n_layer: 6 + n_head: 1 + d_model: ${model.symbols_embedding_dim} + d_head: 64 + d_inner: 1536 + kernel_size: 3 + dropout: 0.1 + dropatt: 0.1 + dropemb: 0.0 + n_embed: 148 # NOTE Should match # of tokens in `symbol_set` + d_embed: ${model.symbols_embedding_dim} + padding_idx: 0 + + output_fft: + _target_: nemo.collections.tts.modules.transformer.FFTransformerDecoder + n_layer: 6 + n_head: 1 + d_model: ${model.symbols_embedding_dim} + d_head: 64 + d_inner: 1536 + kernel_size: 3 + dropout: 0.1 + dropatt: 0.1 + dropemb: 0.0 + + duration_predictor: + _target_: nemo.collections.tts.modules.vits_modules.StochasticDurationPredictor + in_channels: ${model.symbols_embedding_dim} # input_size: ${model.symbols_embedding_dim} + kernel_size: 3 + filter_channels: ${filter_length} # filter_size: 256 + p_dropout: 0.1 # dropout: 0.1 + # n_layers: 6 + + pitch_predictor: + _target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor + input_size: ${model.symbols_embedding_dim} + kernel_size: 3 + filter_size: ${filter_length} + dropout: 0.1 + n_layers: ${model.n_layers} + + generator: + _target_: nemo.collections.tts.modules.vits_modules.Generator + resblock: "1" + resblock_kernel_sizes: [3,7,11] + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_rates: [8,8,2,2] + upsample_initial_channel: 512 + upsample_kernel_sizes: [16,16,4,4] + initial_channel: 384 # initial_input_size: 384 + +trainer: + gpus: -1 # number of gpus + max_epochs: 20000 + num_nodes: 1 + accelerator: ddp + accumulate_grad_batches: 1 + checkpoint_callback: False # Provided by exp_manager + logger: False # Provided by exp_manager + # gradient_clip_val: 1000.0 + flush_logs_every_n_steps: 1000 + log_every_n_steps: 100 + check_val_every_n_epoch: 5 + +exp_manager: + exp_dir: null + name: ${name} + create_tensorboard_logger: True + create_checkpoint_callback: True diff --git a/examples/tts/vits.py b/examples/tts/vits.py new file mode 100644 index 000000000000..24219dbd97d5 --- /dev/null +++ b/examples/tts/vits.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytorch_lightning as pl + +from nemo.collections.common.callbacks import LogEpochTimeCallback +from nemo.collections.tts.models.vits import VitsModel +from nemo.core.config import hydra_runner +from nemo.utils.exp_manager import exp_manager + + +@hydra_runner(config_path="conf", config_name="vits") +def main(cfg): + trainer = pl.Trainer(**cfg.trainer) + exp_manager(trainer, cfg.get("exp_manager", None)) + model = VitsModel(cfg=cfg.model, trainer=trainer) + lr_logger = pl.callbacks.LearningRateMonitor() + epoch_time_logger = LogEpochTimeCallback() + trainer.callbacks.extend([lr_logger, epoch_time_logger]) + trainer.fit(model) + + +if __name__ == '__main__': + main() # noqa pylint: disable=no-value-for-parameter diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index b2c9bfed4d2d..c706f9d6362a 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -422,7 +422,6 @@ def forward(self, x, seq_len): # dot with filterbank energies x = torch.matmul(self.fb.to(x.dtype), x) - # log features if required if self.log: if self.log_zero_guard_type == "add": diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py new file mode 100644 index 000000000000..10e1f7836aec --- /dev/null +++ b/nemo/collections/tts/losses/vits_losses.py @@ -0,0 +1,126 @@ +import torch + +from nemo.core.classes import Loss, typecheck +from nemo.core.neural_types.elements import LossType, VoidType +from nemo.core.neural_types.neural_type import NeuralType + + +class FeatureLoss(Loss): + def input_types(self): + return { + "fmap_r": [[NeuralType(elements_type=VoidType())]], + "fmap_g": [[NeuralType(elements_type=VoidType())]], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + # @typecheck() + def forward(self, fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +class DiscriminatorLoss(Loss): + @property + def input_types(self): + return { + "disc_real_outputs": [NeuralType(('B', 'T'), VoidType())], + "disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "real_losses": [NeuralType(elements_type=LossType())], + "fake_losses": [NeuralType(elements_type=LossType())], + } + + def forward(self, disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + dr = dr.float() + dg = dg.float() + r_loss = torch.mean((1-dr)**2) + g_loss = torch.mean(dg**2) + loss += (r_loss + g_loss) + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +class GeneratorLoss(Loss): + """Generator Loss module""" + + @property + def input_types(self): + return { + "disc_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "fake_losses": [NeuralType(elements_type=LossType())], + } + + @typecheck() + def forward(self, disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + + +class KlLoss(Loss): + @property + def input_types(self): + return { + "z_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "logs_q": [NeuralType(('B', 'D', 'T'), VoidType())], + "m_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "logs_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "z_mask": [NeuralType(('B', 'D', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + @typecheck() + def forward(self, z_p, logs_q, m_p, logs_p, z_mask): + """ + z_p, logs_q: [b, h, t_t] + m_p, logs_p: [b, h, t_t] + """ + z_p = z_p.float() + logs_q = logs_q.float() + m_p = m_p.float() + logs_p = logs_p.float() + z_mask = z_mask.float() + + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) + kl = torch.sum(kl * z_mask) + l = kl / torch.sum(z_mask) + return l diff --git a/nemo/collections/tts/models/__init__.py b/nemo/collections/tts/models/__init__.py index cb5df94a966d..8ca3be12f435 100644 --- a/nemo/collections/tts/models/__init__.py +++ b/nemo/collections/tts/models/__init__.py @@ -30,6 +30,7 @@ from nemo.collections.tts.models.uniglow import UniGlowModel from nemo.collections.tts.models.waveglow import WaveGlowModel from nemo.collections.tts.models.mixer_tts import MixerTTSModel + from nemo.collections.tts.models.vits import VitsModel from nemo.collections.tts.models.univnet import UnivNetModel except ModuleNotFoundError: pass @@ -56,5 +57,6 @@ "FastSpeech2HifiGanE2EModel", "AlignerModel", "MixerTTSModel", + "VitsModel", "UnivNetModel", ] diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py new file mode 100644 index 000000000000..684d41d53a37 --- /dev/null +++ b/nemo/collections/tts/models/vits.py @@ -0,0 +1,330 @@ +import omegaconf +import torch +from dataclasses import dataclass +from hydra.utils import instantiate +from omegaconf import MISSING, DictConfig, OmegaConf +from pytorch_lightning import Trainer +from torch.cuda.amp import autocast +from torch.nn import functional as F +from typing import Any, Dict + +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy +from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss +from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator, spec_to_mel_torch, slice_segments, clip_grad_value_ +from nemo.core.classes.common import PretrainedModelInfo +from nemo.utils import logging + + +@dataclass +class VitsConfig: + parser: Dict[Any, Any] = MISSING + preprocessor: Dict[Any, Any] = MISSING + input_fft: Dict[Any, Any] = MISSING + output_fft: Dict[Any, Any] = MISSING + duration_predictor: Dict[Any, Any] = MISSING + pitch_predictor: Dict[Any, Any] = MISSING + + +class VitsModel(TextToWaveform): + def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): + + if isinstance(cfg, dict): + cfg = OmegaConf.create(cfg) + + super().__init__(cfg=cfg, trainer=trainer) + + schema = OmegaConf.structured(VitsConfig) + # ModelPT ensures that cfg is a DictConfig, but do this second check in case ModelPT changes + if isinstance(cfg, dict): + cfg = OmegaConf.create(cfg) + elif not isinstance(cfg, DictConfig): + raise ValueError(f"cfg was type: {type(cfg)}. Expected either a dict or a DictConfig") + # Ensure passed cfg is compliant with schema + OmegaConf.merge(cfg, schema) + + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) + + self.encoder = instantiate(cfg.input_fft) + self.duration_predictor = instantiate(cfg.duration_predictor) + self.pitch_predictor = instantiate(cfg.pitch_predictor) + + self.generator = instantiate(cfg.generator) + self.multiperioddisc = MultiPeriodDiscriminator() + self.feat_matching_loss = FeatureLoss() + self.disc_loss = DiscriminatorLoss() + self.gen_loss = GeneratorLoss() + self.kl_loss = KlLoss() + + self.max_token_duration = cfg.max_token_duration + + self.pitch_emb = torch.nn.Conv1d( + 1, + cfg.symbols_embedding_dim, + kernel_size=cfg.pitch_embedding_kernel_size, + padding=int((cfg.pitch_embedding_kernel_size - 1) / 2), + ) + + # Store values precomputed from training data for convenience + self.register_buffer('pitch_mean', torch.zeros(1)) + self.register_buffer('pitch_std', torch.zeros(1)) + + self.mel_loss_coeff = cfg.mel_loss_coeff + + self.log_train_images = False + self.logged_real_samples = False + self._tb_logger = None + self.hann_window = None + self.splice_length = cfg.splice_length + self.sample_rate = cfg.sample_rate + self.hop_size = cfg.hop_size + self.n_fft = cfg.train_ds.dataset.n_fft + self.win_length = cfg.train_ds.dataset.win_length + + self.net_g = SynthesizerTrn( + n_vocab = cfg.symbols_embedding_dim, + spec_channels = cfg.train_ds.dataset.n_fft // 2 + 1, + segment_size = cfg.segment_size // cfg.train_ds.dataset.hop_length, + inter_channels = cfg.inter_channels, + hidden_channels = cfg.hidden_channels, + filter_channels = cfg.filter_channels, + n_heads = cfg.n_heads, + n_layers = cfg.n_layers, + kernel_size = cfg.pitch_embedding_kernel_size, + p_dropout = cfg.p_dropout, + resblock = cfg.generator.resblock, + resblock_kernel_sizes = cfg.generator.resblock_kernel_sizes, + resblock_dilation_sizes = cfg.generator.resblock_dilation_sizes, + upsample_rates = cfg.generator.upsample_rates, + upsample_initial_channel = cfg.generator.upsample_initial_channel, + upsample_kernel_sizes = cfg.generator.upsample_kernel_sizes, + ) + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) + self.automatic_optimization = False + + window_fn = { + 'hann': torch.hann_window, + 'hamming': torch.hamming_window, + 'blackman': torch.blackman_window, + 'bartlett': torch.bartlett_window, + 'none': None, + }.get(self.hann_window, None) + + self.stft = lambda x: torch.stft( + input=x, + n_fft=self.n_fft, + hop_length=self.hop_size, + win_length=self.win_length, + window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + ) + + def parse(self, str_input: str) -> torch.tensor: + # TODO: Implement + pass + + def configure_optimizers(self): + self.optim_g = torch.optim.AdamW( + self.net_g.parameters(), + self._cfg.lr, + betas=self._cfg.betas, + eps=self._cfg.eps) + self.optim_d = torch.optim.AdamW( + self.net_d.parameters(), + self._cfg.lr, + betas=self._cfg.betas, + eps=self._cfg.eps) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(self.optim_g, gamma=self._cfg.lr_decay) + scheduler_g_dict = { + 'scheduler': scheduler_g, + 'interval': 'step', + } + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(self.optim_d, gamma=self._cfg.lr_decay) + scheduler_d_dict = { + 'scheduler': scheduler_d, + 'interval': 'step' + } + return [self.optim_g, self.optim_d], [scheduler_g_dict, scheduler_d_dict] + + def forward(self, batch, batch_idx): + with torch.no_grad(): + (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch + + # remove else + x = x[:1] + x_lengths = x_lengths[:1] + + y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.hop_size + + return y_hat, y_hat_lengths + + + def get_spec(self, audio): + with torch.cuda.amp.autocast(enabled=False): + spec = self.stft(audio) + if spec.dtype in [torch.cfloat, torch.cdouble]: + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) + return spec + + def training_step(self, batch, batch_idx): + (y, y_lengths, x, x_lengths) = batch + + spec = self.get_spec(y) + spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) + + with autocast(enabled=False): + y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ + (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) + mel = spec_to_mel_torch( + spec, + self._cfg.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax + ) + y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) + + y_hat_mel = modules.audio_to_mel_torch( + y_hat.squeeze(1), + self._cfg.filter_length, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.hop_size, + self._cfg.preprocessor.n_window_size, + self._cfg.mel_fmin, + self._cfg.mel_fmax + ) + y = torch.unsqueeze(y, 1) + y = slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) + loss_disc_all = loss_disc + + # train discriminator + self.optim_d.zero_grad() + self.manual_backward(loss_disc_all) + clip_grad_value_(self.net_d.parameters(), None) + self.optim_d.step() + + with autocast(enabled=True): + # Generator + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + with autocast(enabled=False): + loss_dur = torch.sum(l_length.float()) + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + # train generator + self.optim_g.zero_grad() + self.manual_backward(loss_gen_all) + clip_grad_value_(self.net_g.parameters(), None) + self.optim_d.step() + + schedulers = self.lr_schedulers() + if schedulers is not None: + sch1, sch2 = schedulers + sch1.step() + sch2.step() + + metrics = { + "loss_gen": loss_gen, + "loss_fm": loss_fm, + "loss_mel * c_mel": loss_mel, + "loss_dur": loss_dur, + "loss_kl * c_kl": loss_kl, + "loss_gen_all": loss_gen_all, + "loss_disc_all": loss_disc_all, + } + + for i, v in enumerate(losses_gen): + metrics["loss_gen_i_{}".format(i)] = v + + for i, v in enumerate(losses_disc_r): + metrics["loss_disc_r_{}".format(i)] = v + + for i, v in enumerate(losses_disc_g): + metrics["loss_disc_g_{}".format(i)] = v + + self.log_dict(metrics, on_step=True, sync_dist=True) + + def validation_step(self, batch, batch_idx): + (y, y_lengths, x, x_lengths) = batch + + y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) + y_hat = y_hat.squeeze() + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length + + mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) + + # plot audio once per epoch + if batch_idx == 0 and self.logger is not None and self.logger.experiment is not None: + self.logger.experiment.add_audio( + "val_wav_target", + y[0, : y_lengths[0]].data.cpu().numpy(), + self.global_step, + sample_rate=self.sample_rate, + ) + + self.logger.experiment.add_audio( + "val_wav_predicted", + y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), + self.global_step, + sample_rate=self.sample_rate, + ) + + self.logger.experiment.add_image( + "val_mel_target", + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + self.logger.experiment.add_image( + "val_mel_predicted", + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + self.global_step, + dataformats="HWC", + ) + + @staticmethod + def _loader(cfg): + try: + # _ = cfg.model.train_ds.manifest_filepath + _ = cfg['dataset']['manifest_filepath'] + except omegaconf.errors.MissingMandatoryValue: + logging.warning("manifest_filepath was skipped. No dataset for this model.") + return None + + dataset = instantiate(cfg.dataset) + return torch.utils.data.DataLoader( # noqa + dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, + ) + + + def setup_training_data(self, cfg): + self._train_dl = self._loader(cfg) + + def setup_validation_data(self, cfg): + self._validation_dl = self._loader(cfg) + + def setup_test_data(self, cfg): + """Omitted.""" + pass + + @classmethod + def list_available_models(cls) -> 'List[PretrainedModelInfo]': + list_of_models = [] + # TODO: List available models?? + return list_of_models + + def convert_text_to_waveform(self, *, tokens): + # TODO: Convert text to waveforms + pass + diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py new file mode 100644 index 000000000000..9293c5af5d4a --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -0,0 +1,19 @@ +import numpy as np +import torch +from .core import maximum_path_c + + +def maximum_path(neg_cent, mask): + """ Cython optimized version. + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) + + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/nemo/collections/tts/modules/monotonic_align/core.c b/nemo/collections/tts/modules/monotonic_align/core.c new file mode 100644 index 000000000000..5631d20a9a00 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/core.c @@ -0,0 +1,21299 @@ +/* Generated by Cython 0.29.21 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "name": "monotonic_align.core", + "sources": [ + "core.pyx" + ] + }, + "module_name": "monotonic_align.core" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_21" +#define CYTHON_HEX_VERSION 0x001D15F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__monotonic_align__core +#define __PYX_HAVE_API__monotonic_align__core +/* Early includes */ +#include "pythread.h" +#include +#include +#include +#include "pystate.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "core.pyx", + "stringsource", +}; +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* MemviewSliceStruct.proto */ +struct __pyx_memoryview_obj; +typedef struct { + struct __pyx_memoryview_obj *memview; + char *data; + Py_ssize_t shape[8]; + Py_ssize_t strides[8]; + Py_ssize_t suboffsets[8]; +} __Pyx_memviewslice; +#define __Pyx_MemoryView_Len(m) (m.shape[0]) + +/* Atomics.proto */ +#include +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +#define __pyx_atomic_int_type int +#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ + !defined(__i386__) + #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type LONG + #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 + #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using Intel atomics" + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif +typedef volatile __pyx_atomic_int_type __pyx_atomic_int; +#if CYTHON_ATOMICS + #define __pyx_add_acquisition_count(memview)\ + __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) +#else + #define __pyx_add_acquisition_count(memview)\ + __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) +#endif + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/*--- Type declarations ---*/ +struct __pyx_array_obj; +struct __pyx_MemviewEnum_obj; +struct __pyx_memoryview_obj; +struct __pyx_memoryviewslice_obj; +struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; + +/* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ +struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { + int __pyx_n; + float max_neg_val; +}; + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ +struct __pyx_array_obj { + PyObject_HEAD + struct __pyx_vtabstruct_array *__pyx_vtab; + char *data; + Py_ssize_t len; + char *format; + int ndim; + Py_ssize_t *_shape; + Py_ssize_t *_strides; + Py_ssize_t itemsize; + PyObject *mode; + PyObject *_format; + void (*callback_free_data)(void *); + int free_data; + int dtype_is_object; +}; + + +/* "View.MemoryView":279 + * + * @cname('__pyx_MemviewEnum') + * cdef class Enum(object): # <<<<<<<<<<<<<< + * cdef object name + * def __init__(self, name): + */ +struct __pyx_MemviewEnum_obj { + PyObject_HEAD + PyObject *name; +}; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ +struct __pyx_memoryview_obj { + PyObject_HEAD + struct __pyx_vtabstruct_memoryview *__pyx_vtab; + PyObject *obj; + PyObject *_size; + PyObject *_array_interface; + PyThread_type_lock lock; + __pyx_atomic_int acquisition_count[2]; + __pyx_atomic_int *acquisition_count_aligned_p; + Py_buffer view; + int flags; + int dtype_is_object; + __Pyx_TypeInfo *typeinfo; +}; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ +struct __pyx_memoryviewslice_obj { + struct __pyx_memoryview_obj __pyx_base; + __Pyx_memviewslice from_slice; + PyObject *from_object; + PyObject *(*to_object_func)(char *); + int (*to_dtype_func)(char *, PyObject *); +}; + + + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ + +struct __pyx_vtabstruct_array { + PyObject *(*get_memview)(struct __pyx_array_obj *); +}; +static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ + +struct __pyx_vtabstruct_memoryview { + char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); + PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); +}; +static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ + +struct __pyx_vtabstruct__memoryviewslice { + struct __pyx_vtabstruct_memoryview __pyx_base; +}; +static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* MemviewSliceInit.proto */ +#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d +#define __Pyx_MEMVIEW_DIRECT 1 +#define __Pyx_MEMVIEW_PTR 2 +#define __Pyx_MEMVIEW_FULL 4 +#define __Pyx_MEMVIEW_CONTIG 8 +#define __Pyx_MEMVIEW_STRIDED 16 +#define __Pyx_MEMVIEW_FOLLOW 32 +#define __Pyx_IS_C_CONTIG 1 +#define __Pyx_IS_F_CONTIG 2 +static int __Pyx_init_memviewslice( + struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference); +static CYTHON_INLINE int __pyx_add_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) +#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) +#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) +#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) +static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* None.proto */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* IncludeStringH.proto */ +#include + +/* BytesEquals.proto */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* StrEquals.proto */ +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + +/* None.proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); + +/* UnaryNegOverflows.proto */ +#define UNARY_NEG_WOULD_OVERFLOW(x)\ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) + +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* decode_c_string_utf16.proto */ +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/* decode_c_string.proto */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* ListExtend.proto */ +static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { +#if CYTHON_COMPILING_IN_CPYTHON + PyObject* none = _PyList_Extend((PyListObject*)L, v); + if (unlikely(!none)) + return -1; + Py_DECREF(none); + return 0; +#else + return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); +#endif +} + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* HasAttr.proto */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable); + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +/* MemviewSliceIsContig.proto */ +static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); + +/* OverlappingSlices.proto */ +static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize); + +/* Capsule.proto */ +static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* TypeInfoCompare.proto */ +static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); + +/* MemviewSliceValidateAndInit.proto */ +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* MemviewSliceCopyTemplate.proto */ +static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ + +/* Module declarations from 'cython.view' */ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'monotonic_align.core' */ +static PyTypeObject *__pyx_array_type = 0; +static PyTypeObject *__pyx_MemviewEnum_type = 0; +static PyTypeObject *__pyx_memoryview_type = 0; +static PyTypeObject *__pyx_memoryviewslice_type = 0; +static PyObject *generic = 0; +static PyObject *strided = 0; +static PyObject *indirect = 0; +static PyObject *contiguous = 0; +static PyObject *indirect_contiguous = 0; +static int __pyx_memoryview_thread_locks_used; +static PyThread_type_lock __pyx_memoryview_thread_locks[8]; +static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ +static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ +static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ +static void *__pyx_align_pointer(void *, size_t); /*proto*/ +static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ +static PyObject *_unellipsify(PyObject *, int); /*proto*/ +static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ +static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ +static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ +static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ +static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ +static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ +static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ +static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ +static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; +static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "monotonic_align.core" +extern int __pyx_module_is_main_monotonic_align__core; +int __pyx_module_is_main_monotonic_align__core = 0; + +/* Implementation of 'monotonic_align.core' */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_enumerate; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_Ellipsis; +static PyObject *__pyx_builtin_id; +static PyObject *__pyx_builtin_IndexError; +static const char __pyx_k_O[] = "O"; +static const char __pyx_k_c[] = "c"; +static const char __pyx_k_id[] = "id"; +static const char __pyx_k_new[] = "__new__"; +static const char __pyx_k_obj[] = "obj"; +static const char __pyx_k_base[] = "base"; +static const char __pyx_k_dict[] = "__dict__"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mode[] = "mode"; +static const char __pyx_k_name[] = "name"; +static const char __pyx_k_ndim[] = "ndim"; +static const char __pyx_k_pack[] = "pack"; +static const char __pyx_k_size[] = "size"; +static const char __pyx_k_step[] = "step"; +static const char __pyx_k_stop[] = "stop"; +static const char __pyx_k_t_xs[] = "t_xs"; +static const char __pyx_k_t_ys[] = "t_ys"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_ASCII[] = "ASCII"; +static const char __pyx_k_class[] = "__class__"; +static const char __pyx_k_error[] = "error"; +static const char __pyx_k_flags[] = "flags"; +static const char __pyx_k_paths[] = "paths"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_start[] = "start"; +static const char __pyx_k_encode[] = "encode"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_name_2[] = "__name__"; +static const char __pyx_k_pickle[] = "pickle"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_struct[] = "struct"; +static const char __pyx_k_unpack[] = "unpack"; +static const char __pyx_k_update[] = "update"; +static const char __pyx_k_values[] = "values"; +static const char __pyx_k_fortran[] = "fortran"; +static const char __pyx_k_memview[] = "memview"; +static const char __pyx_k_Ellipsis[] = "Ellipsis"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_itemsize[] = "itemsize"; +static const char __pyx_k_pyx_type[] = "__pyx_type"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_enumerate[] = "enumerate"; +static const char __pyx_k_pyx_state[] = "__pyx_state"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_IndexError[] = "IndexError"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_pyx_result[] = "__pyx_result"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_PickleError[] = "PickleError"; +static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; +static const char __pyx_k_stringsource[] = "stringsource"; +static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; +static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; +static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; +static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_strided_and_direct[] = ""; +static const char __pyx_k_strided_and_indirect[] = ""; +static const char __pyx_k_contiguous_and_direct[] = ""; +static const char __pyx_k_MemoryView_of_r_object[] = ""; +static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; +static const char __pyx_k_contiguous_and_indirect[] = ""; +static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; +static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; +static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; +static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; +static const char __pyx_k_strided_and_direct_or_indirect[] = ""; +static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; +static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; +static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; +static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; +static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; +static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; +static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; +static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; +static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; +static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; +static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; +static PyObject *__pyx_n_s_ASCII; +static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; +static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; +static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; +static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; +static PyObject *__pyx_kp_s_Cannot_index_with_type_s; +static PyObject *__pyx_n_s_Ellipsis; +static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; +static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; +static PyObject *__pyx_n_s_IndexError; +static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; +static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; +static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; +static PyObject *__pyx_n_s_MemoryError; +static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; +static PyObject *__pyx_kp_s_MemoryView_of_r_object; +static PyObject *__pyx_n_b_O; +static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; +static PyObject *__pyx_n_s_PickleError; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_View_MemoryView; +static PyObject *__pyx_n_s_allocate_buffer; +static PyObject *__pyx_n_s_base; +static PyObject *__pyx_n_s_c; +static PyObject *__pyx_n_u_c; +static PyObject *__pyx_n_s_class; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_kp_s_contiguous_and_direct; +static PyObject *__pyx_kp_s_contiguous_and_indirect; +static PyObject *__pyx_n_s_dict; +static PyObject *__pyx_n_s_dtype_is_object; +static PyObject *__pyx_n_s_encode; +static PyObject *__pyx_n_s_enumerate; +static PyObject *__pyx_n_s_error; +static PyObject *__pyx_n_s_flags; +static PyObject *__pyx_n_s_format; +static PyObject *__pyx_n_s_fortran; +static PyObject *__pyx_n_u_fortran; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; +static PyObject *__pyx_n_s_id; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_itemsize; +static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_memview; +static PyObject *__pyx_n_s_mode; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_name_2; +static PyObject *__pyx_n_s_ndim; +static PyObject *__pyx_n_s_new; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_obj; +static PyObject *__pyx_n_s_pack; +static PyObject *__pyx_n_s_paths; +static PyObject *__pyx_n_s_pickle; +static PyObject *__pyx_n_s_pyx_PickleError; +static PyObject *__pyx_n_s_pyx_checksum; +static PyObject *__pyx_n_s_pyx_getbuffer; +static PyObject *__pyx_n_s_pyx_result; +static PyObject *__pyx_n_s_pyx_state; +static PyObject *__pyx_n_s_pyx_type; +static PyObject *__pyx_n_s_pyx_unpickle_Enum; +static PyObject *__pyx_n_s_pyx_vtable; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_shape; +static PyObject *__pyx_n_s_size; +static PyObject *__pyx_n_s_start; +static PyObject *__pyx_n_s_step; +static PyObject *__pyx_n_s_stop; +static PyObject *__pyx_kp_s_strided_and_direct; +static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; +static PyObject *__pyx_kp_s_strided_and_indirect; +static PyObject *__pyx_kp_s_stringsource; +static PyObject *__pyx_n_s_struct; +static PyObject *__pyx_n_s_t_xs; +static PyObject *__pyx_n_s_t_ys; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_kp_s_unable_to_allocate_array_data; +static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; +static PyObject *__pyx_n_s_unpack; +static PyObject *__pyx_n_s_update; +static PyObject *__pyx_n_s_values; +static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static PyObject *__pyx_int_184977713; +static PyObject *__pyx_int_neg_1; +static float __pyx_k_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_slice__16; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__14; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_tuple__18; +static PyObject *__pyx_tuple__19; +static PyObject *__pyx_tuple__20; +static PyObject *__pyx_tuple__21; +static PyObject *__pyx_tuple__22; +static PyObject *__pyx_tuple__23; +static PyObject *__pyx_tuple__24; +static PyObject *__pyx_tuple__25; +static PyObject *__pyx_codeobj__26; +/* Late includes */ + +/* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + +static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { + float __pyx_v_max_neg_val = __pyx_k_; + int __pyx_v_x; + int __pyx_v_y; + float __pyx_v_v_prev; + float __pyx_v_v_cur; + int __pyx_v_index; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + long __pyx_t_4; + int __pyx_t_5; + long __pyx_t_6; + long __pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + float __pyx_t_11; + float __pyx_t_12; + float __pyx_t_13; + int __pyx_t_14; + Py_ssize_t __pyx_t_15; + Py_ssize_t __pyx_t_16; + if (__pyx_optional_args) { + if (__pyx_optional_args->__pyx_n > 0) { + __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; + } + } + + /* "monotonic_align/core.pyx":13 + * cdef float v_cur + * cdef float tmp + * cdef int index = t_x - 1 # <<<<<<<<<<<<<< + * + * for y in range(t_y): + */ + __pyx_v_index = (__pyx_v_t_x - 1); + + /* "monotonic_align/core.pyx":15 + * cdef int index = t_x - 1 + * + * for y in range(t_y): # <<<<<<<<<<<<<< + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: + */ + __pyx_t_1 = __pyx_v_t_y; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_y = __pyx_t_3; + + /* "monotonic_align/core.pyx":16 + * + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< + * if x == y: + * v_cur = max_neg_val + */ + __pyx_t_4 = (__pyx_v_y + 1); + __pyx_t_5 = __pyx_v_t_x; + if (((__pyx_t_4 < __pyx_t_5) != 0)) { + __pyx_t_6 = __pyx_t_4; + } else { + __pyx_t_6 = __pyx_t_5; + } + __pyx_t_4 = __pyx_t_6; + __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); + __pyx_t_6 = 0; + if (((__pyx_t_5 > __pyx_t_6) != 0)) { + __pyx_t_7 = __pyx_t_5; + } else { + __pyx_t_7 = __pyx_t_6; + } + __pyx_t_6 = __pyx_t_4; + for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { + __pyx_v_x = __pyx_t_5; + + /* "monotonic_align/core.pyx":17 + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: # <<<<<<<<<<<<<< + * v_cur = max_neg_val + * else: + */ + __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":18 + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: + * v_cur = max_neg_val # <<<<<<<<<<<<<< + * else: + * v_cur = value[y-1, x] + */ + __pyx_v_v_cur = __pyx_v_max_neg_val; + + /* "monotonic_align/core.pyx":17 + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: # <<<<<<<<<<<<<< + * v_cur = max_neg_val + * else: + */ + goto __pyx_L7; + } + + /* "monotonic_align/core.pyx":20 + * v_cur = max_neg_val + * else: + * v_cur = value[y-1, x] # <<<<<<<<<<<<<< + * if x == 0: + * if y == 0: + */ + /*else*/ { + __pyx_t_9 = (__pyx_v_y - 1); + __pyx_t_10 = __pyx_v_x; + __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); + } + __pyx_L7:; + + /* "monotonic_align/core.pyx":21 + * else: + * v_cur = value[y-1, x] + * if x == 0: # <<<<<<<<<<<<<< + * if y == 0: + * v_prev = 0. + */ + __pyx_t_8 = ((__pyx_v_x == 0) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":22 + * v_cur = value[y-1, x] + * if x == 0: + * if y == 0: # <<<<<<<<<<<<<< + * v_prev = 0. + * else: + */ + __pyx_t_8 = ((__pyx_v_y == 0) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":23 + * if x == 0: + * if y == 0: + * v_prev = 0. # <<<<<<<<<<<<<< + * else: + * v_prev = max_neg_val + */ + __pyx_v_v_prev = 0.; + + /* "monotonic_align/core.pyx":22 + * v_cur = value[y-1, x] + * if x == 0: + * if y == 0: # <<<<<<<<<<<<<< + * v_prev = 0. + * else: + */ + goto __pyx_L9; + } + + /* "monotonic_align/core.pyx":25 + * v_prev = 0. + * else: + * v_prev = max_neg_val # <<<<<<<<<<<<<< + * else: + * v_prev = value[y-1, x-1] + */ + /*else*/ { + __pyx_v_v_prev = __pyx_v_max_neg_val; + } + __pyx_L9:; + + /* "monotonic_align/core.pyx":21 + * else: + * v_cur = value[y-1, x] + * if x == 0: # <<<<<<<<<<<<<< + * if y == 0: + * v_prev = 0. + */ + goto __pyx_L8; + } + + /* "monotonic_align/core.pyx":27 + * v_prev = max_neg_val + * else: + * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< + * value[y, x] += max(v_prev, v_cur) + * + */ + /*else*/ { + __pyx_t_10 = (__pyx_v_y - 1); + __pyx_t_9 = (__pyx_v_x - 1); + __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); + } + __pyx_L8:; + + /* "monotonic_align/core.pyx":28 + * else: + * v_prev = value[y-1, x-1] + * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< + * + * for y in range(t_y - 1, -1, -1): + */ + __pyx_t_11 = __pyx_v_v_cur; + __pyx_t_12 = __pyx_v_v_prev; + if (((__pyx_t_11 > __pyx_t_12) != 0)) { + __pyx_t_13 = __pyx_t_11; + } else { + __pyx_t_13 = __pyx_t_12; + } + __pyx_t_9 = __pyx_v_y; + __pyx_t_10 = __pyx_v_x; + *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; + } + } + + /* "monotonic_align/core.pyx":30 + * value[y, x] += max(v_prev, v_cur) + * + * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + */ + for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_y = __pyx_t_1; + + /* "monotonic_align/core.pyx":31 + * + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 # <<<<<<<<<<<<<< + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + * index = index - 1 + */ + __pyx_t_10 = __pyx_v_y; + __pyx_t_9 = __pyx_v_index; + *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; + + /* "monotonic_align/core.pyx":32 + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< + * index = index - 1 + * + */ + __pyx_t_14 = ((__pyx_v_index != 0) != 0); + if (__pyx_t_14) { + } else { + __pyx_t_8 = __pyx_t_14; + goto __pyx_L13_bool_binop_done; + } + __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); + if (!__pyx_t_14) { + } else { + __pyx_t_8 = __pyx_t_14; + goto __pyx_L13_bool_binop_done; + } + __pyx_t_9 = (__pyx_v_y - 1); + __pyx_t_10 = __pyx_v_index; + __pyx_t_15 = (__pyx_v_y - 1); + __pyx_t_16 = (__pyx_v_index - 1); + __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); + __pyx_t_8 = __pyx_t_14; + __pyx_L13_bool_binop_done:; + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":33 + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + * index = index - 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_index = (__pyx_v_index - 1); + + /* "monotonic_align/core.pyx":32 + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< + * index = index - 1 + * + */ + } + } + + /* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + + /* function exit code */ +} + +/* "monotonic_align/core.pyx":38 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< + * cdef int b = paths.shape[0] + * cdef int i + */ + +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { + CYTHON_UNUSED int __pyx_v_b; + int __pyx_v_i; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; + Py_ssize_t __pyx_t_6; + Py_ssize_t __pyx_t_7; + + /* "monotonic_align/core.pyx":39 + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< + * cdef int i + * for i in prange(b, nogil=True): + */ + __pyx_v_b = (__pyx_v_paths.shape[0]); + + /* "monotonic_align/core.pyx":41 + * cdef int b = paths.shape[0] + * cdef int i + * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + __pyx_t_1 = __pyx_v_b; + if ((1 == 0)) abort(); + { + #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) + #undef likely + #undef unlikely + #define likely(x) (x) + #define unlikely(x) (x) + #endif + __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; + if (__pyx_t_3 > 0) + { + #ifdef _OPENMP + #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) + #endif /* _OPENMP */ + { + #ifdef _OPENMP + #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) + #endif /* _OPENMP */ + for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ + { + __pyx_v_i = (int)(0 + 1 * __pyx_t_2); + + /* "monotonic_align/core.pyx":42 + * cdef int i + * for i in prange(b, nogil=True): + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< + */ + __pyx_t_4.data = __pyx_v_paths.data; + __pyx_t_4.memview = __pyx_v_paths.memview; + __PYX_INC_MEMVIEW(&__pyx_t_4, 0); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_i; + Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; + __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; +__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; + __pyx_t_4.suboffsets[0] = -1; + +__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; +__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; + __pyx_t_4.suboffsets[1] = -1; + +__pyx_t_5.data = __pyx_v_values.data; + __pyx_t_5.memview = __pyx_v_values.memview; + __PYX_INC_MEMVIEW(&__pyx_t_5, 0); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_i; + Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; + __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; +__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; + __pyx_t_5.suboffsets[0] = -1; + +__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; +__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; + __pyx_t_5.suboffsets[1] = -1; + +__pyx_t_6 = __pyx_v_i; + __pyx_t_7 = __pyx_v_i; + __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); + __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); + __pyx_t_4.memview = NULL; + __pyx_t_4.data = NULL; + __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); + __pyx_t_5.memview = NULL; + __pyx_t_5.data = NULL; + } + } + } + } + } + #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) + #undef likely + #undef unlikely + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) + #endif + } + + /* "monotonic_align/core.pyx":41 + * cdef int b = paths.shape[0] + * cdef int i + * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L5; + } + __pyx_L5:; + } + } + + /* "monotonic_align/core.pyx":38 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< + * cdef int b = paths.shape[0] + * cdef int i + */ + + /* function exit code */ +} + +/* Python wrapper */ +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("maximum_path_c", 0); + __Pyx_XDECREF(__pyx_r); + if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } + __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + +/* Python wrapper */ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_shape = 0; + Py_ssize_t __pyx_v_itemsize; + PyObject *__pyx_v_format = 0; + PyObject *__pyx_v_mode = 0; + int __pyx_v_allocate_buffer; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; + PyObject* values[5] = {0,0,0,0,0}; + values[3] = ((PyObject *)__pyx_n_s_c); + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); + if (value) { values[4] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_shape = ((PyObject*)values[0]); + __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_v_format = values[2]; + __pyx_v_mode = values[3]; + if (values[4]) { + __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) + } else { + + /* "View.MemoryView":123 + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< + * + * cdef int idx + */ + __pyx_v_allocate_buffer = ((int)1); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) + if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { + PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) + } + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { + int __pyx_v_idx; + Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_dim; + PyObject **__pyx_v_p; + char __pyx_v_order; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + Py_ssize_t __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + __Pyx_INCREF(__pyx_v_format); + + /* "View.MemoryView":129 + * cdef PyObject **p + * + * self.ndim = len(shape) # <<<<<<<<<<<<<< + * self.itemsize = itemsize + * + */ + if (unlikely(__pyx_v_shape == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 129, __pyx_L1_error) + } + __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) + __pyx_v_self->ndim = ((int)__pyx_t_1); + + /* "View.MemoryView":130 + * + * self.ndim = len(shape) + * self.itemsize = itemsize # <<<<<<<<<<<<<< + * + * if not self.ndim: + */ + __pyx_v_self->itemsize = __pyx_v_itemsize; + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 133, __pyx_L1_error) + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + } + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 136, __pyx_L1_error) + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + } + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_format); + __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":139 + * + * if not isinstance(format, bytes): + * format = format.encode('ASCII') # <<<<<<<<<<<<<< + * self._format = format # keep a reference to the byte string + * self.format = self._format + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + } + + /* "View.MemoryView":140 + * if not isinstance(format, bytes): + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< + * self.format = self._format + * + */ + if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) + __pyx_t_3 = __pyx_v_format; + __Pyx_INCREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_v_self->_format); + __Pyx_DECREF(__pyx_v_self->_format); + __pyx_v_self->_format = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":141 + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + * self.format = self._format # <<<<<<<<<<<<<< + * + * + */ + if (unlikely(__pyx_v_self->_format == Py_None)) { + PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); + __PYX_ERR(1, 141, __pyx_L1_error) + } + __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) + __pyx_v_self->format = __pyx_t_7; + + /* "View.MemoryView":144 + * + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< + * self._strides = self._shape + self.ndim + * + */ + __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); + + /* "View.MemoryView":145 + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< + * + * if not self._shape: + */ + __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 148, __pyx_L1_error) + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + } + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + __pyx_t_8 = 0; + __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; + for (;;) { + if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_9; + __pyx_v_idx = __pyx_t_8; + __pyx_t_8 = (__pyx_t_8 + 1); + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":153 + * for idx, dim in enumerate(shape): + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< + * self._shape[idx] = dim + * + */ + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 153, __pyx_L1_error) + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + } + + /* "View.MemoryView":154 + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim # <<<<<<<<<<<<<< + * + * cdef char order + */ + (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) + if (__pyx_t_4) { + + /* "View.MemoryView":158 + * cdef char order + * if mode == 'fortran': + * order = b'F' # <<<<<<<<<<<<<< + * self.mode = u'fortran' + * elif mode == 'c': + */ + __pyx_v_order = 'F'; + + /* "View.MemoryView":159 + * if mode == 'fortran': + * order = b'F' + * self.mode = u'fortran' # <<<<<<<<<<<<<< + * elif mode == 'c': + * order = b'C' + */ + __Pyx_INCREF(__pyx_n_u_fortran); + __Pyx_GIVEREF(__pyx_n_u_fortran); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_fortran; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) + if (likely(__pyx_t_4)) { + + /* "View.MemoryView":161 + * self.mode = u'fortran' + * elif mode == 'c': + * order = b'C' # <<<<<<<<<<<<<< + * self.mode = u'c' + * else: + */ + __pyx_v_order = 'C'; + + /* "View.MemoryView":162 + * elif mode == 'c': + * order = b'C' + * self.mode = u'c' # <<<<<<<<<<<<<< + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + */ + __Pyx_INCREF(__pyx_n_u_c); + __Pyx_GIVEREF(__pyx_n_u_c); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_c; + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":164 + * self.mode = u'c' + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< + * + * self.len = fill_contig_strides_array(self._shape, self._strides, + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 164, __pyx_L1_error) + } + __pyx_L10:; + + /* "View.MemoryView":166 + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + * + * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< + * itemsize, self.ndim, order) + * + */ + __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); + + /* "View.MemoryView":169 + * itemsize, self.ndim, order) + * + * self.free_data = allocate_buffer # <<<<<<<<<<<<<< + * self.dtype_is_object = format == b'O' + * if allocate_buffer: + */ + __pyx_v_self->free_data = __pyx_v_allocate_buffer; + + /* "View.MemoryView":170 + * + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< + * if allocate_buffer: + * + */ + __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_v_self->dtype_is_object = __pyx_t_4; + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = (__pyx_v_allocate_buffer != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":174 + * + * + * self.data = malloc(self.len) # <<<<<<<<<<<<<< + * if not self.data: + * raise MemoryError("unable to allocate array data.") + */ + __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 176, __pyx_L1_error) + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":179 + * + * if self.dtype_is_object: + * p = self.data # <<<<<<<<<<<<<< + * for i in range(self.len / itemsize): + * p[i] = Py_None + */ + __pyx_v_p = ((PyObject **)__pyx_v_self->data); + + /* "View.MemoryView":180 + * if self.dtype_is_object: + * p = self.data + * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< + * p[i] = Py_None + * Py_INCREF(Py_None) + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); + __pyx_t_9 = __pyx_t_1; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { + __pyx_v_i = __pyx_t_11; + + /* "View.MemoryView":181 + * p = self.data + * for i in range(self.len / itemsize): + * p[i] = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + (__pyx_v_p[__pyx_v_i]) = Py_None; + + /* "View.MemoryView":182 + * for i in range(self.len / itemsize): + * p[i] = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + Py_INCREF(Py_None); + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + } + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_format); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_bufmode; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + char *__pyx_t_4; + Py_ssize_t __pyx_t_5; + int __pyx_t_6; + Py_ssize_t *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":186 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 # <<<<<<<<<<<<<< + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = -1; + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":188 + * cdef int bufmode = -1 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + goto __pyx_L3; + } + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":190 + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + */ + __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + } + __pyx_L3:; + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 192, __pyx_L1_error) + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + } + + /* "View.MemoryView":193 + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data # <<<<<<<<<<<<<< + * info.len = self.len + * info.ndim = self.ndim + */ + __pyx_t_4 = __pyx_v_self->data; + __pyx_v_info->buf = __pyx_t_4; + + /* "View.MemoryView":194 + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + * info.len = self.len # <<<<<<<<<<<<<< + * info.ndim = self.ndim + * info.shape = self._shape + */ + __pyx_t_5 = __pyx_v_self->len; + __pyx_v_info->len = __pyx_t_5; + + /* "View.MemoryView":195 + * info.buf = self.data + * info.len = self.len + * info.ndim = self.ndim # <<<<<<<<<<<<<< + * info.shape = self._shape + * info.strides = self._strides + */ + __pyx_t_6 = __pyx_v_self->ndim; + __pyx_v_info->ndim = __pyx_t_6; + + /* "View.MemoryView":196 + * info.len = self.len + * info.ndim = self.ndim + * info.shape = self._shape # <<<<<<<<<<<<<< + * info.strides = self._strides + * info.suboffsets = NULL + */ + __pyx_t_7 = __pyx_v_self->_shape; + __pyx_v_info->shape = __pyx_t_7; + + /* "View.MemoryView":197 + * info.ndim = self.ndim + * info.shape = self._shape + * info.strides = self._strides # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = self.itemsize + */ + __pyx_t_7 = __pyx_v_self->_strides; + __pyx_v_info->strides = __pyx_t_7; + + /* "View.MemoryView":198 + * info.shape = self._shape + * info.strides = self._strides + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = self.itemsize + * info.readonly = 0 + */ + __pyx_v_info->suboffsets = NULL; + + /* "View.MemoryView":199 + * info.strides = self._strides + * info.suboffsets = NULL + * info.itemsize = self.itemsize # <<<<<<<<<<<<<< + * info.readonly = 0 + * + */ + __pyx_t_5 = __pyx_v_self->itemsize; + __pyx_v_info->itemsize = __pyx_t_5; + + /* "View.MemoryView":200 + * info.suboffsets = NULL + * info.itemsize = self.itemsize + * info.readonly = 0 # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + __pyx_v_info->readonly = 0; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":203 + * + * if flags & PyBUF_FORMAT: + * info.format = self.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_4 = __pyx_v_self->format; + __pyx_v_info->format = __pyx_t_4; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":205 + * info.format = self.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.obj = self + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L5:; + + /* "View.MemoryView":207 + * info.format = NULL + * + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + +/* Python wrapper */ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":213 + * def __dealloc__(array self): + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) # <<<<<<<<<<<<<< + * elif self.free_data: + * if self.dtype_is_object: + */ + __pyx_v_self->callback_free_data(__pyx_v_self->data); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + __pyx_t_1 = (__pyx_v_self->free_data != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":216 + * elif self.free_data: + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< + * self._strides, self.ndim, False) + * free(self.data) + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + } + + /* "View.MemoryView":218 + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + * free(self.data) # <<<<<<<<<<<<<< + * PyObject_Free(self._shape) + * + */ + free(__pyx_v_self->data); + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + } + __pyx_L3:; + + /* "View.MemoryView":219 + * self._strides, self.ndim, False) + * free(self.data) + * PyObject_Free(self._shape) # <<<<<<<<<<<<<< + * + * @property + */ + PyObject_Free(__pyx_v_self->_shape); + + /* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":223 + * @property + * def memview(self): + * return self.get_memview() # <<<<<<<<<<<<<< + * + * @cname('get_memview') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_memview", 0); + + /* "View.MemoryView":227 + * @cname('get_memview') + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< + * return memoryview(self, flags, self.dtype_is_object) + * + */ + __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); + + /* "View.MemoryView":228 + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":231 + * + * def __len__(self): + * return self._shape[0] # <<<<<<<<<<<<<< + * + * def __getattr__(self, attr): + */ + __pyx_r = (__pyx_v_self->_shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getattr__", 0); + + /* "View.MemoryView":234 + * + * def __getattr__(self, attr): + * return getattr(self.memview, attr) # <<<<<<<<<<<<<< + * + * def __getitem__(self, item): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":237 + * + * def __getitem__(self, item): + * return self.memview[item] # <<<<<<<<<<<<<< + * + * def __setitem__(self, item, value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + +/* Python wrapper */ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + + /* "View.MemoryView":240 + * + * def __setitem__(self, item, value): + * self.memview[item] = value # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + +static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { + struct __pyx_array_obj *__pyx_v_result = 0; + struct __pyx_array_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("array_cwrapper", 0); + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":249 + * + * if buf == NULL: + * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); + __pyx_t_2 = 0; + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + /*else*/ { + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); + __pyx_t_4 = 0; + __pyx_t_5 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":252 + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) # <<<<<<<<<<<<<< + * result.data = buf + * + */ + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "View.MemoryView":253 + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) + * result.data = buf # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->data = __pyx_v_buf; + } + __pyx_L3:; + + /* "View.MemoryView":255 + * result.data = buf + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + +/* Python wrapper */ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_name = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_name = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__", 0); + + /* "View.MemoryView":282 + * cdef object name + * def __init__(self, name): + * self.name = name # <<<<<<<<<<<<<< + * def __repr__(self): + * return self.name + */ + __Pyx_INCREF(__pyx_v_name); + __Pyx_GIVEREF(__pyx_v_name); + __Pyx_GOTREF(__pyx_v_self->name); + __Pyx_DECREF(__pyx_v_self->name); + __pyx_v_self->name = __pyx_v_name; + + /* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + +/* Python wrapper */ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":284 + * self.name = name + * def __repr__(self): + * return self.name # <<<<<<<<<<<<<< + * + * cdef generic = Enum("") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->name); + __pyx_r = __pyx_v_self->name; + goto __pyx_L0; + + /* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_v_state = 0; + PyObject *__pyx_v__dict = 0; + int __pyx_v_use_setstate; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":5 + * cdef object _dict + * cdef bint use_setstate + * state = (self.name,) # <<<<<<<<<<<<<< + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_self->name); + __Pyx_GIVEREF(__pyx_v_self->name); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); + __pyx_v_state = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "(tree fragment)":6 + * cdef bint use_setstate + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< + * if _dict is not None: + * state += (_dict,) + */ + __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v__dict = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + __pyx_t_2 = (__pyx_v__dict != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":8 + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + * state += (_dict,) # <<<<<<<<<<<<<< + * use_setstate = True + * else: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v__dict); + __Pyx_GIVEREF(__pyx_v__dict); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); + __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "(tree fragment)":9 + * if _dict is not None: + * state += (_dict,) + * use_setstate = True # <<<<<<<<<<<<<< + * else: + * use_setstate = self.name is not None + */ + __pyx_v_use_setstate = 1; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + goto __pyx_L3; + } + + /* "(tree fragment)":11 + * use_setstate = True + * else: + * use_setstate = self.name is not None # <<<<<<<<<<<<<< + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_self->name != Py_None); + __pyx_v_use_setstate = __pyx_t_3; + } + __pyx_L3:; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + __pyx_t_3 = (__pyx_v_use_setstate != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":13 + * use_setstate = self.name is not None + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); + __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); + __pyx_t_4 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + } + + /* "(tree fragment)":15 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); + __pyx_t_5 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + } + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_state); + __Pyx_XDECREF(__pyx_v__dict); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":17 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) + __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + +static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { + Py_intptr_t __pyx_v_aligned_p; + size_t __pyx_v_offset; + void *__pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":300 + * cdef void *align_pointer(void *memory, size_t alignment) nogil: + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< + * cdef size_t offset + * + */ + __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); + + /* "View.MemoryView":304 + * + * with cython.cdivision(True): + * offset = aligned_p % alignment # <<<<<<<<<<<<<< + * + * if offset > 0: + */ + __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + __pyx_t_1 = ((__pyx_v_offset > 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":307 + * + * if offset > 0: + * aligned_p += alignment - offset # <<<<<<<<<<<<<< + * + * return aligned_p + */ + __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + } + + /* "View.MemoryView":309 + * aligned_p += alignment - offset + * + * return aligned_p # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = ((void *)__pyx_v_aligned_p); + goto __pyx_L0; + + /* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + +/* Python wrapper */ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_obj = 0; + int __pyx_v_flags; + int __pyx_v_dtype_is_object; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); + if (value) { values[2] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_obj = values[0]; + __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + if (values[2]) { + __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + __pyx_v_dtype_is_object = ((int)0); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "View.MemoryView":346 + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj # <<<<<<<<<<<<<< + * self.flags = flags + * if type(self) is memoryview or obj is not None: + */ + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + __Pyx_GOTREF(__pyx_v_self->obj); + __Pyx_DECREF(__pyx_v_self->obj); + __pyx_v_self->obj = __pyx_v_obj; + + /* "View.MemoryView":347 + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj + * self.flags = flags # <<<<<<<<<<<<<< + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + */ + __pyx_v_self->flags = __pyx_v_flags; + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); + __pyx_t_3 = (__pyx_t_2 != 0); + if (!__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_3 = (__pyx_v_obj != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "View.MemoryView":349 + * self.flags = flags + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + */ + __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":351 + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; + + /* "View.MemoryView":352 + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * global __pyx_memoryview_thread_locks_used + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + } + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + } + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":356 + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + */ + __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + + /* "View.MemoryView":357 + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":359 + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< + * if self.lock is NULL: + * raise MemoryError + */ + __pyx_v_self->lock = PyThread_allocate_lock(); + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":361 + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + } + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":364 + * + * if flags & PyBUF_FORMAT: + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< + * else: + * self.dtype_is_object = dtype_is_object + */ + __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_self->dtype_is_object = __pyx_t_1; + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + goto __pyx_L10; + } + + /* "View.MemoryView":366 + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + */ + /*else*/ { + __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; + } + __pyx_L10:; + + /* "View.MemoryView":368 + * self.dtype_is_object = dtype_is_object + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL + */ + __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); + + /* "View.MemoryView":370 + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL # <<<<<<<<<<<<<< + * + * def __dealloc__(memoryview self): + */ + __pyx_v_self->typeinfo = NULL; + + /* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + +/* Python wrapper */ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { + int __pyx_v_i; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyThread_type_lock __pyx_t_6; + PyThread_type_lock __pyx_t_7; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + __pyx_t_1 = (__pyx_v_self->obj != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":374 + * def __dealloc__(memoryview self): + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + */ + __Pyx_ReleaseBuffer((&__pyx_v_self->view)); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":377 + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< + * Py_DECREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; + + /* "View.MemoryView":378 + * + * (<__pyx_buffer *> &self.view).obj = NULL + * Py_DECREF(Py_None) # <<<<<<<<<<<<<< + * + * cdef int i + */ + Py_DECREF(Py_None); + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + } + __pyx_L3:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":383 + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + */ + __pyx_t_3 = __pyx_memoryview_thread_locks_used; + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":385 + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":388 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< + * break + * else: + */ + __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); + + /* "View.MemoryView":387 + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break + */ + (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; + (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + } + + /* "View.MemoryView":389 + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break # <<<<<<<<<<<<<< + * else: + * PyThread_free_lock(self.lock) + */ + goto __pyx_L6_break; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + } + } + /*else*/ { + + /* "View.MemoryView":391 + * break + * else: + * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + */ + PyThread_free_lock(__pyx_v_self->lock); + } + __pyx_L6_break:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + } + + /* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + Py_ssize_t __pyx_v_dim; + char *__pyx_v_itemp; + PyObject *__pyx_v_idx = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + Py_ssize_t __pyx_t_6; + char *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_item_pointer", 0); + + /* "View.MemoryView":395 + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< + * + * for dim, idx in enumerate(index): + */ + __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + __pyx_t_1 = 0; + if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { + __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } else { + if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 397, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_5); + } + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); + __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_1; + __pyx_t_1 = (__pyx_t_1 + 1); + + /* "View.MemoryView":398 + * + * for dim, idx in enumerate(index): + * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< + * + * return itemp + */ + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_7; + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":400 + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + * return itemp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_itemp; + goto __pyx_L0; + + /* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_indices = NULL; + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + char *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":405 + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: + * return self # <<<<<<<<<<<<<< + * + * have_slices, indices = _unellipsify(index, self.view.ndim) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __pyx_r = ((PyObject *)__pyx_v_self); + goto __pyx_L0; + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + } + + /* "View.MemoryView":407 + * return self + * + * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * cdef char *itemp + */ + __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (likely(__pyx_t_3 != Py_None)) { + PyObject* sequence = __pyx_t_3; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 407, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + #else + __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_v_indices = __pyx_t_5; + __pyx_t_5 = 0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) + if (__pyx_t_2) { + + /* "View.MemoryView":411 + * cdef char *itemp + * if have_slices: + * return memview_slice(self, indices) # <<<<<<<<<<<<<< + * else: + * itemp = self.get_item_pointer(indices) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + } + + /* "View.MemoryView":413 + * return memview_slice(self, indices) + * else: + * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< + * return self.convert_item_to_object(itemp) + * + */ + /*else*/ { + __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_6; + + /* "View.MemoryView":414 + * else: + * itemp = self.get_item_pointer(indices) + * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< + * + * def __setitem__(memoryview self, object index, object value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_indices); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + +/* Python wrapper */ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_obj = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + __Pyx_INCREF(__pyx_v_index); + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + __pyx_t_1 = (__pyx_v_self->view.readonly != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 418, __pyx_L1_error) + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + } + + /* "View.MemoryView":420 + * raise TypeError("Cannot assign to read-only memoryview") + * + * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * if have_slices: + */ + __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(__pyx_t_2 != Py_None)) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 420, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_3; + __pyx_t_3 = 0; + __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":423 + * + * if have_slices: + * obj = self.is_slice(value) # <<<<<<<<<<<<<< + * if obj: + * self.setitem_slice_assignment(self[index], obj) + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_obj = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":425 + * obj = self.is_slice(value) + * if obj: + * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< + * else: + * self.setitem_slice_assign_scalar(self[index], value) + */ + __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":427 + * self.setitem_slice_assignment(self[index], obj) + * else: + * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< + * else: + * self.setitem_indexed(index, value) + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L5:; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":429 + * self.setitem_slice_assign_scalar(self[index], value) + * else: + * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< + * + * cdef is_slice(self, obj): + */ + /*else*/ { + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L4:; + + /* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_slice", 0); + __Pyx_INCREF(__pyx_v_obj); + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + /*try:*/ { + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":435 + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) # <<<<<<<<<<<<<< + * except TypeError: + * return None + */ + __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); + __pyx_t_6 = 0; + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L9_try_end; + __pyx_L4_error:; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "View.MemoryView":436 + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + * except TypeError: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); + if (__pyx_t_9) { + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GOTREF(__pyx_t_8); + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":437 + * self.dtype_is_object) + * except TypeError: + * return None # <<<<<<<<<<<<<< + * + * return obj + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_except_return; + } + goto __pyx_L6_except_error; + __pyx_L6_except_error:; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L1_error; + __pyx_L7_except_return:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L0; + __pyx_L9_try_end:; + } + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + } + + /* "View.MemoryView":439 + * return None + * + * return obj # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assignment(self, dst, src): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { + __Pyx_memviewslice __pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_src_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + __Pyx_memviewslice *__pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":446 + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< + * src.ndim, dst.ndim, self.dtype_is_object) + * + */ + if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) + __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) + + /* "View.MemoryView":447 + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { + int __pyx_v_array[0x80]; + void *__pyx_v_tmp; + void *__pyx_v_item; + __Pyx_memviewslice *__pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_tmp_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + char const *__pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); + + /* "View.MemoryView":451 + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + * cdef int array[128] + * cdef void *tmp = NULL # <<<<<<<<<<<<<< + * cdef void *item + * + */ + __pyx_v_tmp = NULL; + + /* "View.MemoryView":456 + * cdef __Pyx_memviewslice *dst_slice + * cdef __Pyx_memviewslice tmp_slice + * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< + * + * if self.view.itemsize > sizeof(array): + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) + __pyx_v_dst_slice = __pyx_t_1; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":459 + * + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< + * if tmp == NULL: + * raise MemoryError + */ + __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":461 + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * item = tmp + * else: + */ + PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + } + + /* "View.MemoryView":462 + * if tmp == NULL: + * raise MemoryError + * item = tmp # <<<<<<<<<<<<<< + * else: + * item = array + */ + __pyx_v_item = __pyx_v_tmp; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":464 + * item = tmp + * else: + * item = array # <<<<<<<<<<<<<< + * + * try: + */ + /*else*/ { + __pyx_v_item = ((void *)__pyx_v_array); + } + __pyx_L3:; + + /* "View.MemoryView":466 + * item = array + * + * try: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * ( item)[0] = value + */ + /*try:*/ { + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":468 + * try: + * if self.dtype_is_object: + * ( item)[0] = value # <<<<<<<<<<<<<< + * else: + * self.assign_item_from_object( item, value) + */ + (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":470 + * ( item)[0] = value + * else: + * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L8:; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":475 + * + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + * item, self.dtype_is_object) + */ + __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + } + + /* "View.MemoryView":476 + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< + * item, self.dtype_is_object) + * finally: + */ + __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); + } + + /* "View.MemoryView":479 + * item, self.dtype_is_object) + * finally: + * PyMem_Free(tmp) # <<<<<<<<<<<<<< + * + * cdef setitem_indexed(self, index, value): + */ + /*finally:*/ { + /*normal exit:*/{ + PyMem_Free(__pyx_v_tmp); + goto __pyx_L7; + } + __pyx_L6_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; + { + PyMem_Free(__pyx_v_tmp); + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); + } + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_XGIVEREF(__pyx_t_8); + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; + goto __pyx_L1_error; + } + __pyx_L7:; + } + + /* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_indexed", 0); + + /* "View.MemoryView":482 + * + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< + * self.assign_item_from_object(itemp, value) + * + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_1; + + /* "View.MemoryView":483 + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_v_struct = NULL; + PyObject *__pyx_v_bytesitem = 0; + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + PyObject *__pyx_t_9 = NULL; + size_t __pyx_t_10; + int __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":488 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef bytes bytesitem + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":491 + * cdef bytes bytesitem + * + * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< + * try: + * result = struct.unpack(self.view.format, bytesitem) + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "View.MemoryView":493 + * bytesitem = itemp[:self.view.itemsize] + * try: + * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< + * except struct.error: + * raise ValueError("Unable to convert item to object") + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + __pyx_t_8 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_8 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + { + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_9); + if (__pyx_t_7) { + __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; + } + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); + __Pyx_INCREF(__pyx_v_bytesitem); + __Pyx_GIVEREF(__pyx_v_bytesitem); + PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); + __pyx_t_6 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + } + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + /*else:*/ { + __pyx_t_10 = strlen(__pyx_v_self->view.format); + __pyx_t_11 = ((__pyx_t_10 == 1) != 0); + if (__pyx_t_11) { + + /* "View.MemoryView":498 + * else: + * if len(self.view.format) == 1: + * return result[0] # <<<<<<<<<<<<<< + * return result + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L6_except_return; + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + } + + /* "View.MemoryView":499 + * if len(self.view.format) == 1: + * return result[0] + * return result # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_result); + __pyx_r = __pyx_v_result; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "View.MemoryView":494 + * try: + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: # <<<<<<<<<<<<<< + * raise ValueError("Unable to convert item to object") + * else: + */ + __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); + __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; + if (__pyx_t_8) { + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_1); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 495, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesitem); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_v_struct = NULL; + char __pyx_v_c; + PyObject *__pyx_v_bytesvalue = 0; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + char *__pyx_t_11; + char *__pyx_t_12; + char *__pyx_t_13; + char *__pyx_t_14; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":504 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef char c + * cdef bytes bytesvalue + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + __pyx_t_2 = PyTuple_Check(__pyx_v_value); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "View.MemoryView":510 + * + * if isinstance(value, tuple): + * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< + * else: + * bytesvalue = struct.pack(self.view.format, value) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":512 + * bytesvalue = struct.pack(self.view.format, *value) + * else: + * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< + * + * for i, c in enumerate(bytesvalue): + */ + /*else*/ { + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + { + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); + __Pyx_INCREF(__pyx_v_value); + __Pyx_GIVEREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); + __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = 0; + if (unlikely(__pyx_v_bytesvalue == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); + __PYX_ERR(1, 514, __pyx_L1_error) + } + __Pyx_INCREF(__pyx_v_bytesvalue); + __pyx_t_10 = __pyx_v_bytesvalue; + __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); + __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); + for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { + __pyx_t_11 = __pyx_t_14; + __pyx_v_c = (__pyx_t_11[0]); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + __pyx_v_i = __pyx_t_9; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = (__pyx_t_9 + 1); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; + } + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesvalue); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + char *__pyx_t_5; + void *__pyx_t_6; + int __pyx_t_7; + Py_ssize_t __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_self->view.readonly != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 520, __pyx_L1_error) + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + } + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":523 + * + * if flags & PyBUF_ND: + * info.shape = self.view.shape # <<<<<<<<<<<<<< + * else: + * info.shape = NULL + */ + __pyx_t_4 = __pyx_v_self->view.shape; + __pyx_v_info->shape = __pyx_t_4; + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":525 + * info.shape = self.view.shape + * else: + * info.shape = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_STRIDES: + */ + /*else*/ { + __pyx_v_info->shape = NULL; + } + __pyx_L6:; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":528 + * + * if flags & PyBUF_STRIDES: + * info.strides = self.view.strides # <<<<<<<<<<<<<< + * else: + * info.strides = NULL + */ + __pyx_t_4 = __pyx_v_self->view.strides; + __pyx_v_info->strides = __pyx_t_4; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + goto __pyx_L7; + } + + /* "View.MemoryView":530 + * info.strides = self.view.strides + * else: + * info.strides = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_INDIRECT: + */ + /*else*/ { + __pyx_v_info->strides = NULL; + } + __pyx_L7:; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":533 + * + * if flags & PyBUF_INDIRECT: + * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< + * else: + * info.suboffsets = NULL + */ + __pyx_t_4 = __pyx_v_self->view.suboffsets; + __pyx_v_info->suboffsets = __pyx_t_4; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":535 + * info.suboffsets = self.view.suboffsets + * else: + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + /*else*/ { + __pyx_v_info->suboffsets = NULL; + } + __pyx_L8:; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":538 + * + * if flags & PyBUF_FORMAT: + * info.format = self.view.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_5 = __pyx_v_self->view.format; + __pyx_v_info->format = __pyx_t_5; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":540 + * info.format = self.view.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.buf = self.view.buf + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L9:; + + /* "View.MemoryView":542 + * info.format = NULL + * + * info.buf = self.view.buf # <<<<<<<<<<<<<< + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + */ + __pyx_t_6 = __pyx_v_self->view.buf; + __pyx_v_info->buf = __pyx_t_6; + + /* "View.MemoryView":543 + * + * info.buf = self.view.buf + * info.ndim = self.view.ndim # <<<<<<<<<<<<<< + * info.itemsize = self.view.itemsize + * info.len = self.view.len + */ + __pyx_t_7 = __pyx_v_self->view.ndim; + __pyx_v_info->ndim = __pyx_t_7; + + /* "View.MemoryView":544 + * info.buf = self.view.buf + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< + * info.len = self.view.len + * info.readonly = self.view.readonly + */ + __pyx_t_8 = __pyx_v_self->view.itemsize; + __pyx_v_info->itemsize = __pyx_t_8; + + /* "View.MemoryView":545 + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + * info.len = self.view.len # <<<<<<<<<<<<<< + * info.readonly = self.view.readonly + * info.obj = self + */ + __pyx_t_8 = __pyx_v_self->view.len; + __pyx_v_info->len = __pyx_t_8; + + /* "View.MemoryView":546 + * info.itemsize = self.view.itemsize + * info.len = self.view.len + * info.readonly = self.view.readonly # <<<<<<<<<<<<<< + * info.obj = self + * + */ + __pyx_t_1 = __pyx_v_self->view.readonly; + __pyx_v_info->readonly = __pyx_t_1; + + /* "View.MemoryView":547 + * info.len = self.view.len + * info.readonly = self.view.readonly + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":554 + * @property + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< + * transpose_memslice(&result.from_slice) + * return result + */ + __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":555 + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) + + /* "View.MemoryView":556 + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + * return result # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":560 + * @property + * def base(self): + * return self.obj # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->obj); + __pyx_r = __pyx_v_self->obj; + goto __pyx_L0; + + /* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_length; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":564 + * @property + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_length = (__pyx_t_2[0]); + __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_stride; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 570, __pyx_L1_error) + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + } + + /* "View.MemoryView":572 + * raise ValueError("Buffer view does not expose strides") + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_v_stride = (__pyx_t_3[0]); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + Py_ssize_t *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + } + + /* "View.MemoryView":579 + * return (-1,) * self.view.ndim + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); + for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { + __pyx_t_4 = __pyx_t_6; + __pyx_v_suboffset = (__pyx_t_4[0]); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":583 + * @property + * def ndim(self): + * return self.view.ndim # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":587 + * @property + * def itemsize(self): + * return self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":591 + * @property + * def nbytes(self): + * return self.size * self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + __pyx_t_1 = (__pyx_v_self->_size == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":596 + * def size(self): + * if self._size is None: + * result = 1 # <<<<<<<<<<<<<< + * + * for length in self.view.shape[:self.view.ndim]: + */ + __Pyx_INCREF(__pyx_int_1); + __pyx_v_result = __pyx_int_1; + + /* "View.MemoryView":598 + * result = 1 + * + * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< + * result *= length + * + */ + __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); + __pyx_t_6 = 0; + + /* "View.MemoryView":599 + * + * for length in self.view.shape[:self.view.ndim]: + * result *= length # <<<<<<<<<<<<<< + * + * self._size = result + */ + __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); + __pyx_t_6 = 0; + } + + /* "View.MemoryView":601 + * result *= length + * + * self._size = result # <<<<<<<<<<<<<< + * + * return self._size + */ + __Pyx_INCREF(__pyx_v_result); + __Pyx_GIVEREF(__pyx_v_result); + __Pyx_GOTREF(__pyx_v_self->_size); + __Pyx_DECREF(__pyx_v_self->_size); + __pyx_v_self->_size = __pyx_v_result; + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + } + + /* "View.MemoryView":603 + * self._size = result + * + * return self._size # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->_size); + __pyx_r = __pyx_v_self->_size; + goto __pyx_L0; + + /* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":607 + * def __len__(self): + * if self.view.ndim >= 1: + * return self.view.shape[0] # <<<<<<<<<<<<<< + * + * return 0 + */ + __pyx_r = (__pyx_v_self->view.shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + } + + /* "View.MemoryView":609 + * return self.view.shape[0] + * + * return 0 # <<<<<<<<<<<<<< + * + * def __repr__(self): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":613 + * def __repr__(self): + * return "" % (self.base.__class__.__name__, + * id(self)) # <<<<<<<<<<<<<< + * + * def __str__(self): + */ + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__str__", 0); + + /* "View.MemoryView":616 + * + * def __str__(self): + * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_c_contig", 0); + + /* "View.MemoryView":622 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":623 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< + * + * def is_f_contig(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_f_contig", 0); + + /* "View.MemoryView":628 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":629 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< + * + * def copy(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_mslice; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy", 0); + + /* "View.MemoryView":633 + * def copy(self): + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &mslice) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); + + /* "View.MemoryView":635 + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + * + * slice_copy(self, &mslice) # <<<<<<<<<<<<<< + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); + + /* "View.MemoryView":636 + * + * slice_copy(self, &mslice) + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_C_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":641 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< + * + * def copy_fortran(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy_fortran", 0); + + /* "View.MemoryView":645 + * def copy_fortran(self): + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &src) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); + + /* "View.MemoryView":647 + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + * + * slice_copy(self, &src) # <<<<<<<<<<<<<< + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); + + /* "View.MemoryView":648 + * + * slice_copy(self, &src) + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_F_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) + __pyx_v_dst = __pyx_t_1; + + /* "View.MemoryView":653 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + +static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { + struct __pyx_memoryview_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); + + /* "View.MemoryView":658 + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< + * result.typeinfo = typeinfo + * return result + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_o); + __Pyx_GIVEREF(__pyx_v_o); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":659 + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_v_result->typeinfo = __pyx_v_typeinfo; + + /* "View.MemoryView":660 + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_check') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("memoryview_check", 0); + + /* "View.MemoryView":664 + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): + * return isinstance(o, memoryview) # <<<<<<<<<<<<<< + * + * cdef tuple _unellipsify(object index, int ndim): + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + +static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { + PyObject *__pyx_v_tup = NULL; + PyObject *__pyx_v_result = NULL; + int __pyx_v_have_slices; + int __pyx_v_seen_ellipsis; + CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; + PyObject *__pyx_v_item = NULL; + Py_ssize_t __pyx_v_nslices; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + PyObject *(*__pyx_t_6)(PyObject *); + PyObject *__pyx_t_7 = NULL; + Py_ssize_t __pyx_t_8; + int __pyx_t_9; + int __pyx_t_10; + PyObject *__pyx_t_11 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_unellipsify", 0); + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + __pyx_t_1 = PyTuple_Check(__pyx_v_index); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":672 + * """ + * if not isinstance(index, tuple): + * tup = (index,) # <<<<<<<<<<<<<< + * else: + * tup = index + */ + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_index); + __Pyx_GIVEREF(__pyx_v_index); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); + __pyx_v_tup = __pyx_t_3; + __pyx_t_3 = 0; + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":674 + * tup = (index,) + * else: + * tup = index # <<<<<<<<<<<<<< + * + * result = [] + */ + /*else*/ { + __Pyx_INCREF(__pyx_v_index); + __pyx_v_tup = __pyx_v_index; + } + __pyx_L3:; + + /* "View.MemoryView":676 + * tup = index + * + * result = [] # <<<<<<<<<<<<<< + * have_slices = False + * seen_ellipsis = False + */ + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_result = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":677 + * + * result = [] + * have_slices = False # <<<<<<<<<<<<<< + * seen_ellipsis = False + * for idx, item in enumerate(tup): + */ + __pyx_v_have_slices = 0; + + /* "View.MemoryView":678 + * result = [] + * have_slices = False + * seen_ellipsis = False # <<<<<<<<<<<<<< + * for idx, item in enumerate(tup): + * if item is Ellipsis: + */ + __pyx_v_seen_ellipsis = 0; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + __Pyx_INCREF(__pyx_int_0); + __pyx_t_3 = __pyx_int_0; + if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { + __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; + __pyx_t_6 = NULL; + } else { + __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_6)) { + if (likely(PyList_CheckExact(__pyx_t_4))) { + if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } else { + if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } + } else { + __pyx_t_7 = __pyx_t_6(__pyx_t_4); + if (unlikely(!__pyx_t_7)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 679, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_7); + } + __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_INCREF(__pyx_t_3); + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); + __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_3); + __pyx_t_3 = __pyx_t_7; + __pyx_t_7 = 0; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":683 + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True # <<<<<<<<<<<<<< + * else: + * result.append(slice(None)) + */ + __pyx_v_seen_ellipsis = 1; + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + goto __pyx_L7; + } + + /* "View.MemoryView":685 + * seen_ellipsis = True + * else: + * result.append(slice(None)) # <<<<<<<<<<<<<< + * have_slices = True + * else: + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":686 + * else: + * result.append(slice(None)) + * have_slices = True # <<<<<<<<<<<<<< + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + */ + __pyx_v_have_slices = 1; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + goto __pyx_L6; + } + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + /*else*/ { + __pyx_t_2 = PySlice_Check(__pyx_v_item); + __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); + __pyx_t_1 = __pyx_t_10; + __pyx_L9_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":689 + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< + * + * have_slices = have_slices or isinstance(item, slice) + */ + __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(1, 689, __pyx_L1_error) + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + } + + /* "View.MemoryView":691 + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< + * result.append(item) + * + */ + __pyx_t_10 = (__pyx_v_have_slices != 0); + if (!__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = PySlice_Check(__pyx_v_item); + __pyx_t_2 = (__pyx_t_10 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_have_slices = __pyx_t_1; + + /* "View.MemoryView":692 + * + * have_slices = have_slices or isinstance(item, slice) + * result.append(item) # <<<<<<<<<<<<<< + * + * nslices = ndim - len(result) + */ + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) + } + __pyx_L6:; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":694 + * result.append(item) + * + * nslices = ndim - len(result) # <<<<<<<<<<<<<< + * if nslices: + * result.extend([slice(None)] * nslices) + */ + __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) + __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + __pyx_t_1 = (__pyx_v_nslices != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":696 + * nslices = ndim - len(result) + * if nslices: + * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< + * + * return have_slices or nslices, tuple(result) + */ + __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + } + + /* "View.MemoryView":698 + * result.extend([slice(None)] * nslices) + * + * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + */ + __Pyx_XDECREF(__pyx_r); + if (!__pyx_v_have_slices) { + } else { + __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L14_bool_binop_done; + } + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_L14_bool_binop_done:; + __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_r = ((PyObject*)__pyx_t_11); + __pyx_t_11 = 0; + goto __pyx_L0; + + /* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_tup); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_XDECREF(__pyx_v_item); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + +static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); + + /* "View.MemoryView":701 + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") + */ + __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); + for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { + __pyx_t_1 = __pyx_t_3; + __pyx_v_suboffset = (__pyx_t_1[0]); + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(1, 703, __pyx_L1_error) + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + } + } + + /* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { + int __pyx_v_new_ndim; + int __pyx_v_suboffset_dim; + int __pyx_v_dim; + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + __Pyx_memviewslice *__pyx_v_p_src; + struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; + __Pyx_memviewslice *__pyx_v_p_dst; + int *__pyx_v_p_suboffset_dim; + Py_ssize_t __pyx_v_start; + Py_ssize_t __pyx_v_stop; + Py_ssize_t __pyx_v_step; + int __pyx_v_have_start; + int __pyx_v_have_stop; + int __pyx_v_have_step; + PyObject *__pyx_v_index = NULL; + struct __pyx_memoryview_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + struct __pyx_memoryview_obj *__pyx_t_4; + char *__pyx_t_5; + int __pyx_t_6; + Py_ssize_t __pyx_t_7; + PyObject *(*__pyx_t_8)(PyObject *); + PyObject *__pyx_t_9 = NULL; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + Py_ssize_t __pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memview_slice", 0); + + /* "View.MemoryView":711 + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): + * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< + * cdef bint negative_step + * cdef __Pyx_memviewslice src, dst + */ + __pyx_v_new_ndim = 0; + __pyx_v_suboffset_dim = -1; + + /* "View.MemoryView":718 + * + * + * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< + * + * cdef _memoryviewslice memviewsliceobj + */ + (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); + + /* "View.MemoryView":722 + * cdef _memoryviewslice memviewsliceobj + * + * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(1, 722, __pyx_L1_error) + } + } + #endif + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":725 + * + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview # <<<<<<<<<<<<<< + * p_src = &memviewsliceobj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":726 + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, &src) + */ + __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + goto __pyx_L3; + } + + /* "View.MemoryView":728 + * p_src = &memviewsliceobj.from_slice + * else: + * slice_copy(memview, &src) # <<<<<<<<<<<<<< + * p_src = &src + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); + + /* "View.MemoryView":729 + * else: + * slice_copy(memview, &src) + * p_src = &src # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_p_src = (&__pyx_v_src); + } + __pyx_L3:; + + /* "View.MemoryView":735 + * + * + * dst.memview = p_src.memview # <<<<<<<<<<<<<< + * dst.data = p_src.data + * + */ + __pyx_t_4 = __pyx_v_p_src->memview; + __pyx_v_dst.memview = __pyx_t_4; + + /* "View.MemoryView":736 + * + * dst.memview = p_src.memview + * dst.data = p_src.data # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_v_p_src->data; + __pyx_v_dst.data = __pyx_t_5; + + /* "View.MemoryView":741 + * + * + * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< + * cdef int *p_suboffset_dim = &suboffset_dim + * cdef Py_ssize_t start, stop, step + */ + __pyx_v_p_dst = (&__pyx_v_dst); + + /* "View.MemoryView":742 + * + * cdef __Pyx_memviewslice *p_dst = &dst + * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< + * cdef Py_ssize_t start, stop, step + * cdef bint have_start, have_stop, have_step + */ + __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + __pyx_t_6 = 0; + if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { + __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; + __pyx_t_8 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_8)) { + if (likely(PyList_CheckExact(__pyx_t_3))) { + if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } else { + if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } + } else { + __pyx_t_9 = __pyx_t_8(__pyx_t_3); + if (unlikely(!__pyx_t_9)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 746, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_9); + } + __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); + __pyx_t_9 = 0; + __pyx_v_dim = __pyx_t_6; + __pyx_t_6 = (__pyx_t_6 + 1); + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":751 + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< + * 0, 0, 0, # have_{start,stop,step} + * False) + */ + __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) + + /* "View.MemoryView":748 + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + goto __pyx_L6; + } + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + __pyx_t_2 = (__pyx_v_index == Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":755 + * False) + * elif index is None: + * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + */ + (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; + + /* "View.MemoryView":756 + * elif index is None: + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 + */ + (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; + + /* "View.MemoryView":757 + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< + * new_ndim += 1 + * else: + */ + (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; + + /* "View.MemoryView":758 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 # <<<<<<<<<<<<<< + * else: + * start = index.start or 0 + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + goto __pyx_L6; + } + + /* "View.MemoryView":760 + * new_ndim += 1 + * else: + * start = index.start or 0 # <<<<<<<<<<<<<< + * stop = index.stop or 0 + * step = index.step or 0 + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L7_bool_binop_done:; + __pyx_v_start = __pyx_t_10; + + /* "View.MemoryView":761 + * else: + * start = index.start or 0 + * stop = index.stop or 0 # <<<<<<<<<<<<<< + * step = index.step or 0 + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L9_bool_binop_done:; + __pyx_v_stop = __pyx_t_10; + + /* "View.MemoryView":762 + * start = index.start or 0 + * stop = index.stop or 0 + * step = index.step or 0 # <<<<<<<<<<<<<< + * + * have_start = index.start is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L11_bool_binop_done:; + __pyx_v_step = __pyx_t_10; + + /* "View.MemoryView":764 + * step = index.step or 0 + * + * have_start = index.start is not None # <<<<<<<<<<<<<< + * have_stop = index.stop is not None + * have_step = index.step is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_start = __pyx_t_1; + + /* "View.MemoryView":765 + * + * have_start = index.start is not None + * have_stop = index.stop is not None # <<<<<<<<<<<<<< + * have_step = index.step is not None + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_stop = __pyx_t_1; + + /* "View.MemoryView":766 + * have_start = index.start is not None + * have_stop = index.stop is not None + * have_step = index.step is not None # <<<<<<<<<<<<<< + * + * slice_memviewslice( + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_step = __pyx_t_1; + + /* "View.MemoryView":768 + * have_step = index.step is not None + * + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) + + /* "View.MemoryView":774 + * have_start, have_stop, have_step, + * True) + * new_ndim += 1 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + } + __pyx_L6:; + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":778 + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< + * memviewsliceobj.to_dtype_func, + * memview.dtype_is_object) + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } + + /* "View.MemoryView":779 + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * else: + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + } + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + /*else*/ { + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":783 + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { + Py_ssize_t __pyx_v_new_shape; + int __pyx_v_negative_step; + int __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + __pyx_t_1 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":830 + * + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + } + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + __pyx_t_1 = (0 <= __pyx_v_start); + if (__pyx_t_1) { + __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); + } + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":832 + * start += shape + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< + * else: + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + } + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":835 + * else: + * + * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< + * + * if have_step and step == 0: + */ + /*else*/ { + __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step < 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L6_bool_binop_done:; + __pyx_v_negative_step = __pyx_t_2; + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + __pyx_t_1 = (__pyx_v_have_step != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step == 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L9_bool_binop_done:; + if (__pyx_t_2) { + + /* "View.MemoryView":838 + * + * if have_step and step == 0: + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + } + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + __pyx_t_2 = (__pyx_v_have_start != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":843 + * if have_start: + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if start < 0: + * start = 0 + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":845 + * start += shape + * if start < 0: + * start = 0 # <<<<<<<<<<<<<< + * elif start >= shape: + * if negative_step: + */ + __pyx_v_start = 0; + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + } + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + goto __pyx_L12; + } + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":848 + * elif start >= shape: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = shape + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L14; + } + + /* "View.MemoryView":850 + * start = shape - 1 + * else: + * start = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + /*else*/ { + __pyx_v_start = __pyx_v_shape; + } + __pyx_L14:; + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + } + __pyx_L12:; + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + goto __pyx_L11; + } + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":853 + * else: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = 0 + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L15; + } + + /* "View.MemoryView":855 + * start = shape - 1 + * else: + * start = 0 # <<<<<<<<<<<<<< + * + * if have_stop: + */ + /*else*/ { + __pyx_v_start = 0; + } + __pyx_L15:; + } + __pyx_L11:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + __pyx_t_2 = (__pyx_v_have_stop != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":859 + * if have_stop: + * if stop < 0: + * stop += shape # <<<<<<<<<<<<<< + * if stop < 0: + * stop = 0 + */ + __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":861 + * stop += shape + * if stop < 0: + * stop = 0 # <<<<<<<<<<<<<< + * elif stop > shape: + * stop = shape + */ + __pyx_v_stop = 0; + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + } + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + goto __pyx_L17; + } + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":863 + * stop = 0 + * elif stop > shape: + * stop = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + __pyx_v_stop = __pyx_v_shape; + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + } + __pyx_L17:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + goto __pyx_L16; + } + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":866 + * else: + * if negative_step: + * stop = -1 # <<<<<<<<<<<<<< + * else: + * stop = shape + */ + __pyx_v_stop = -1L; + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + goto __pyx_L19; + } + + /* "View.MemoryView":868 + * stop = -1 + * else: + * stop = shape # <<<<<<<<<<<<<< + * + * if not have_step: + */ + /*else*/ { + __pyx_v_stop = __pyx_v_shape; + } + __pyx_L19:; + } + __pyx_L16:; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":871 + * + * if not have_step: + * step = 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_step = 1; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + } + + /* "View.MemoryView":875 + * + * with cython.cdivision(True): + * new_shape = (stop - start) // step # <<<<<<<<<<<<<< + * + * if (stop - start) - step * new_shape: + */ + __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":878 + * + * if (stop - start) - step * new_shape: + * new_shape += 1 # <<<<<<<<<<<<<< + * + * if new_shape < 0: + */ + __pyx_v_new_shape = (__pyx_v_new_shape + 1); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + } + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":881 + * + * if new_shape < 0: + * new_shape = 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_new_shape = 0; + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + } + + /* "View.MemoryView":884 + * + * + * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset + */ + (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); + + /* "View.MemoryView":885 + * + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< + * dst.suboffsets[new_ndim] = suboffset + * + */ + (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; + + /* "View.MemoryView":886 + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; + } + __pyx_L3:; + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":890 + * + * if suboffset_dim[0] < 0: + * dst.data += start * stride # <<<<<<<<<<<<<< + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride + */ + __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + goto __pyx_L23; + } + + /* "View.MemoryView":892 + * dst.data += start * stride + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< + * + * if suboffset >= 0: + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_suboffset_dim[0]); + (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); + } + __pyx_L23:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":897 + * if not is_slice: + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + */ + __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + goto __pyx_L26; + } + + /* "View.MemoryView":899 + * dst.data = ( dst.data)[0] + suboffset + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< + * "must be indexed and not sliced", dim) + * else: + */ + /*else*/ { + + /* "View.MemoryView":900 + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< + * else: + * suboffset_dim[0] = new_ndim + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) + } + __pyx_L26:; + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + goto __pyx_L25; + } + + /* "View.MemoryView":902 + * "must be indexed and not sliced", dim) + * else: + * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< + * + * return 0 + */ + /*else*/ { + (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; + } + __pyx_L25:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + } + + /* "View.MemoryView":904 + * suboffset_dim[0] = new_ndim + * + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + +static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_suboffset; + Py_ssize_t __pyx_v_itemsize; + char *__pyx_v_resultp; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("pybuffer_index", 0); + + /* "View.MemoryView":912 + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< + * cdef Py_ssize_t itemsize = view.itemsize + * cdef char *resultp + */ + __pyx_v_suboffset = -1L; + + /* "View.MemoryView":913 + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< + * cdef char *resultp + * + */ + __pyx_t_1 = __pyx_v_view->itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":917 + * + * if view.ndim == 0: + * shape = view.len / itemsize # <<<<<<<<<<<<<< + * stride = itemsize + * else: + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); + + /* "View.MemoryView":918 + * if view.ndim == 0: + * shape = view.len / itemsize + * stride = itemsize # <<<<<<<<<<<<<< + * else: + * shape = view.shape[dim] + */ + __pyx_v_stride = __pyx_v_itemsize; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + goto __pyx_L3; + } + + /* "View.MemoryView":920 + * stride = itemsize + * else: + * shape = view.shape[dim] # <<<<<<<<<<<<<< + * stride = view.strides[dim] + * if view.suboffsets != NULL: + */ + /*else*/ { + __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); + + /* "View.MemoryView":921 + * else: + * shape = view.shape[dim] + * stride = view.strides[dim] # <<<<<<<<<<<<<< + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] + */ + __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":923 + * stride = view.strides[dim] + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< + * + * if index < 0: + */ + __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + } + } + __pyx_L3:; + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":926 + * + * if index < 0: + * index += view.shape[dim] # <<<<<<<<<<<<<< + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + */ + __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":928 + * index += view.shape[dim] + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * if index >= shape: + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 928, __pyx_L1_error) + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + } + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":931 + * + * if index >= shape: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * resultp = bufp + index * stride + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 931, __pyx_L1_error) + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":933 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * resultp = bufp + index * stride # <<<<<<<<<<<<<< + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset + */ + __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":935 + * resultp = bufp + index * stride + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< + * + * return resultp + */ + __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + } + + /* "View.MemoryView":937 + * resultp = ( resultp)[0] + suboffset + * + * return resultp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_resultp; + goto __pyx_L0; + + /* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + +static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { + int __pyx_v_ndim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + int __pyx_v_i; + int __pyx_v_j; + int __pyx_r; + int __pyx_t_1; + Py_ssize_t *__pyx_t_2; + long __pyx_t_3; + long __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":944 + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: + * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< + * + * cdef Py_ssize_t *shape = memslice.shape + */ + __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; + __pyx_v_ndim = __pyx_t_1; + + /* "View.MemoryView":946 + * cdef int ndim = memslice.memview.view.ndim + * + * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< + * cdef Py_ssize_t *strides = memslice.strides + * + */ + __pyx_t_2 = __pyx_v_memslice->shape; + __pyx_v_shape = __pyx_t_2; + + /* "View.MemoryView":947 + * + * cdef Py_ssize_t *shape = memslice.shape + * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = __pyx_v_memslice->strides; + __pyx_v_strides = __pyx_t_2; + + /* "View.MemoryView":951 + * + * cdef int i, j + * for i in range(ndim / 2): # <<<<<<<<<<<<<< + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + */ + __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":952 + * cdef int i, j + * for i in range(ndim / 2): + * j = ndim - 1 - i # <<<<<<<<<<<<<< + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] + */ + __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); + + /* "View.MemoryView":953 + * for i in range(ndim / 2): + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< + * shape[i], shape[j] = shape[j], shape[i] + * + */ + __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); + __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); + (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; + (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; + + /* "View.MemoryView":954 + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + */ + __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); + __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); + (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; + (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); + if (!__pyx_t_8) { + } else { + __pyx_t_7 = __pyx_t_8; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); + __pyx_t_7 = __pyx_t_8; + __pyx_L6_bool_binop_done:; + if (__pyx_t_7) { + + /* "View.MemoryView":957 + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< + * + * return 1 + */ + __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + } + } + + /* "View.MemoryView":959 + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + * return 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 1; + goto __pyx_L0; + + /* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = 0; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + +/* Python wrapper */ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":977 + * + * def __dealloc__(self): + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); + + /* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":981 + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) # <<<<<<<<<<<<<< + * else: + * return memoryview.convert_item_to_object(self, itemp) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + } + + /* "View.MemoryView":983 + * return self.to_object_func(itemp) + * else: + * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":987 + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< + * else: + * memoryview.assign_item_from_object(self, itemp, value) + */ + __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":989 + * self.to_dtype_func(itemp, value) + * else: + * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< + * + * @property + */ + /*else*/ { + __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":993 + * @property + * def base(self): + * return self.from_object # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->from_object); + __pyx_r = __pyx_v_self->from_object; + goto __pyx_L0; + + /* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + __Pyx_TypeInfo *__pyx_t_4; + Py_buffer __pyx_t_5; + Py_ssize_t *__pyx_t_6; + Py_ssize_t *__pyx_t_7; + Py_ssize_t *__pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_fromslice", 0); + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1008 + * + * if memviewslice.memview == Py_None: + * return None # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + } + + /* "View.MemoryView":1013 + * + * + * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< + * + * result.from_slice = memviewslice + */ + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1015 + * result = _memoryviewslice(None, 0, dtype_is_object) + * + * result.from_slice = memviewslice # <<<<<<<<<<<<<< + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + */ + __pyx_v_result->from_slice = __pyx_v_memviewslice; + + /* "View.MemoryView":1016 + * + * result.from_slice = memviewslice + * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< + * + * result.from_object = ( memviewslice.memview).base + */ + __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); + + /* "View.MemoryView":1018 + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< + * result.typeinfo = memviewslice.memview.typeinfo + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_v_result->from_object); + __Pyx_DECREF(__pyx_v_result->from_object); + __pyx_v_result->from_object = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":1019 + * + * result.from_object = ( memviewslice.memview).base + * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< + * + * result.view = memviewslice.memview.view + */ + __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; + __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; + + /* "View.MemoryView":1021 + * result.typeinfo = memviewslice.memview.typeinfo + * + * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + */ + __pyx_t_5 = __pyx_v_memviewslice.memview->view; + __pyx_v_result->__pyx_base.view = __pyx_t_5; + + /* "View.MemoryView":1022 + * + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + */ + __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); + + /* "View.MemoryView":1023 + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data + * result.view.ndim = ndim # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; + + /* "View.MemoryView":1024 + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; + + /* "View.MemoryView":1025 + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1028 + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< + * else: + * result.flags = PyBUF_RECORDS_RO + */ + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1030 + * result.flags = PyBUF_RECORDS + * else: + * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< + * + * result.view.shape = result.from_slice.shape + */ + /*else*/ { + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; + } + __pyx_L4:; + + /* "View.MemoryView":1032 + * result.flags = PyBUF_RECORDS_RO + * + * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< + * result.view.strides = result.from_slice.strides + * + */ + __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); + + /* "View.MemoryView":1033 + * + * result.view.shape = result.from_slice.shape + * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); + + /* "View.MemoryView":1036 + * + * + * result.view.suboffsets = NULL # <<<<<<<<<<<<<< + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + */ + __pyx_v_result->__pyx_base.view.suboffsets = NULL; + + /* "View.MemoryView":1037 + * + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + */ + __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_v_suboffset = (__pyx_t_6[0]); + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1039 + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); + + /* "View.MemoryView":1040 + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + * break # <<<<<<<<<<<<<< + * + * result.view.len = result.view.itemsize + */ + goto __pyx_L6_break; + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + } + } + __pyx_L6_break:; + + /* "View.MemoryView":1042 + * break + * + * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< + * for length in result.view.shape[:ndim]: + * result.view.len *= length + */ + __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + + /* "View.MemoryView":1043 + * + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< + * result.view.len *= length + * + */ + __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1044 + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: + * result.view.len *= length # <<<<<<<<<<<<<< + * + * result.to_object_func = to_object_func + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + } + + /* "View.MemoryView":1046 + * result.view.len *= length + * + * result.to_object_func = to_object_func # <<<<<<<<<<<<<< + * result.to_dtype_func = to_dtype_func + * + */ + __pyx_v_result->to_object_func = __pyx_v_to_object_func; + + /* "View.MemoryView":1047 + * + * result.to_object_func = to_object_func + * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; + + /* "View.MemoryView":1049 + * result.to_dtype_func = to_dtype_func + * + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { + struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; + __Pyx_memviewslice *__pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_slice_from_memview", 0); + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1056 + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): + * obj = memview # <<<<<<<<<<<<<< + * return &obj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":1057 + * if isinstance(memview, _memoryviewslice): + * obj = memview + * return &obj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, mslice) + */ + __pyx_r = (&__pyx_v_obj->from_slice); + goto __pyx_L0; + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + } + + /* "View.MemoryView":1059 + * return &obj.from_slice + * else: + * slice_copy(memview, mslice) # <<<<<<<<<<<<<< + * return mslice + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); + + /* "View.MemoryView":1060 + * else: + * slice_copy(memview, mslice) + * return mslice # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_slice_copy') + */ + __pyx_r = __pyx_v_mslice; + goto __pyx_L0; + } + + /* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_obj); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { + int __pyx_v_dim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + Py_ssize_t *__pyx_v_suboffsets; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + Py_ssize_t __pyx_t_5; + __Pyx_RefNannySetupContext("slice_copy", 0); + + /* "View.MemoryView":1067 + * cdef (Py_ssize_t*) shape, strides, suboffsets + * + * shape = memview.view.shape # <<<<<<<<<<<<<< + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets + */ + __pyx_t_1 = __pyx_v_memview->view.shape; + __pyx_v_shape = __pyx_t_1; + + /* "View.MemoryView":1068 + * + * shape = memview.view.shape + * strides = memview.view.strides # <<<<<<<<<<<<<< + * suboffsets = memview.view.suboffsets + * + */ + __pyx_t_1 = __pyx_v_memview->view.strides; + __pyx_v_strides = __pyx_t_1; + + /* "View.MemoryView":1069 + * shape = memview.view.shape + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< + * + * dst.memview = <__pyx_memoryview *> memview + */ + __pyx_t_1 = __pyx_v_memview->view.suboffsets; + __pyx_v_suboffsets = __pyx_t_1; + + /* "View.MemoryView":1071 + * suboffsets = memview.view.suboffsets + * + * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< + * dst.data = memview.view.buf + * + */ + __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); + + /* "View.MemoryView":1072 + * + * dst.memview = <__pyx_memoryview *> memview + * dst.data = memview.view.buf # <<<<<<<<<<<<<< + * + * for dim in range(memview.view.ndim): + */ + __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); + + /* "View.MemoryView":1074 + * dst.data = memview.view.buf + * + * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + */ + __pyx_t_2 = __pyx_v_memview->view.ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_dim = __pyx_t_4; + + /* "View.MemoryView":1075 + * + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + */ + (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); + + /* "View.MemoryView":1076 + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + * + */ + (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); + + /* "View.MemoryView":1077 + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object') + */ + if ((__pyx_v_suboffsets != 0)) { + __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); + } else { + __pyx_t_5 = -1L; + } + (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; + } + + /* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { + __Pyx_memviewslice __pyx_v_memviewslice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy", 0); + + /* "View.MemoryView":1083 + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< + * return memoryview_copy_from_slice(memview, &memviewslice) + * + */ + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); + + /* "View.MemoryView":1084 + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) + * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object_from_slice') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { + PyObject *(*__pyx_v_to_object_func)(char *); + int (*__pyx_v_to_dtype_func)(char *, PyObject *); + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *(*__pyx_t_3)(char *); + int (*__pyx_t_4)(char *, PyObject *); + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1095 + * + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + */ + __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; + __pyx_v_to_object_func = __pyx_t_3; + + /* "View.MemoryView":1096 + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< + * else: + * to_object_func = NULL + */ + __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; + __pyx_v_to_dtype_func = __pyx_t_4; + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1098 + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + * to_object_func = NULL # <<<<<<<<<<<<<< + * to_dtype_func = NULL + * + */ + /*else*/ { + __pyx_v_to_object_func = NULL; + + /* "View.MemoryView":1099 + * else: + * to_object_func = NULL + * to_dtype_func = NULL # <<<<<<<<<<<<<< + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + */ + __pyx_v_to_dtype_func = NULL; + } + __pyx_L3:; + + /* "View.MemoryView":1101 + * to_dtype_func = NULL + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< + * to_object_func, to_dtype_func, + * memview.dtype_is_object) + */ + __Pyx_XDECREF(__pyx_r); + + /* "View.MemoryView":1103 + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + * to_object_func, to_dtype_func, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + +static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { + Py_ssize_t __pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + __pyx_t_1 = ((__pyx_v_arg < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1111 + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: + * return -arg # <<<<<<<<<<<<<< + * else: + * return arg + */ + __pyx_r = (-__pyx_v_arg); + goto __pyx_L0; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + } + + /* "View.MemoryView":1113 + * return -arg + * else: + * return arg # <<<<<<<<<<<<<< + * + * @cname('__pyx_get_best_slice_order') + */ + /*else*/ { + __pyx_r = __pyx_v_arg; + goto __pyx_L0; + } + + /* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + +static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { + int __pyx_v_i; + Py_ssize_t __pyx_v_c_stride; + Py_ssize_t __pyx_v_f_stride; + char __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1121 + * """ + * cdef int i + * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< + * cdef Py_ssize_t f_stride = 0 + * + */ + __pyx_v_c_stride = 0; + + /* "View.MemoryView":1122 + * cdef int i + * cdef Py_ssize_t c_stride = 0 + * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_f_stride = 0; + + /* "View.MemoryView":1124 + * cdef Py_ssize_t f_stride = 0 + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1126 + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1127 + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + goto __pyx_L4_break; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L4_break:; + + /* "View.MemoryView":1129 + * break + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + */ + __pyx_t_1 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_1; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1131 + * for i in range(ndim): + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1132 + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + */ + goto __pyx_L7_break; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L7_break:; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1135 + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + * return 'C' # <<<<<<<<<<<<<< + * else: + * return 'F' + */ + __pyx_r = 'C'; + goto __pyx_L0; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + } + + /* "View.MemoryView":1137 + * return 'C' + * else: + * return 'F' # <<<<<<<<<<<<<< + * + * @cython.cdivision(True) + */ + /*else*/ { + __pyx_r = 'F'; + goto __pyx_L0; + } + + /* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + +static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; + Py_ssize_t __pyx_v_dst_extent; + Py_ssize_t __pyx_v_src_stride; + Py_ssize_t __pyx_v_dst_stride; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + + /* "View.MemoryView":1147 + * + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + */ + __pyx_v_src_extent = (__pyx_v_src_shape[0]); + + /* "View.MemoryView":1148 + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] + */ + __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); + + /* "View.MemoryView":1149 + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + */ + __pyx_v_src_stride = (__pyx_v_src_strides[0]); + + /* "View.MemoryView":1150 + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + + /* "View.MemoryView":1154 + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + */ + __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); + if (__pyx_t_2) { + __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); + } + __pyx_t_3 = (__pyx_t_2 != 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L5_bool_binop_done:; + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + if (__pyx_t_1) { + + /* "View.MemoryView":1155 + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1157 + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1158 + * else: + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< + * src_data += src_stride + * dst_data += dst_stride + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); + + /* "View.MemoryView":1159 + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * else: + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1160 + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L4:; + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1162 + * dst_data += dst_stride + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * _copy_strided_to_strided(src_data, src_strides + 1, + * dst_data, dst_strides + 1, + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1163 + * else: + * for i in range(dst_extent): + * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< + * dst_data, dst_strides + 1, + * src_shape + 1, dst_shape + 1, + */ + _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); + + /* "View.MemoryView":1167 + * src_shape + 1, dst_shape + 1, + * ndim - 1, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1168 + * ndim - 1, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + + /* function exit code */ +} + +/* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + +static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + + /* "View.MemoryView":1173 + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< + * src.shape, dst.shape, ndim, itemsize) + * + */ + _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_size; + Py_ssize_t __pyx_r; + Py_ssize_t __pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + + /* "View.MemoryView":1179 + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< + * + * for shape in src.shape[:ndim]: + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_size = __pyx_t_1; + + /* "View.MemoryView":1181 + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + * + * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< + * size *= shape + * + */ + __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); + for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_shape = (__pyx_t_2[0]); + + /* "View.MemoryView":1182 + * + * for shape in src.shape[:ndim]: + * size *= shape # <<<<<<<<<<<<<< + * + * return size + */ + __pyx_v_size = (__pyx_v_size * __pyx_v_shape); + } + + /* "View.MemoryView":1184 + * size *= shape + * + * return size # <<<<<<<<<<<<<< + * + * @cname('__pyx_fill_contig_strides_array') + */ + __pyx_r = __pyx_v_size; + goto __pyx_L0; + + /* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { + int __pyx_v_idx; + Py_ssize_t __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + __pyx_t_1 = ((__pyx_v_order == 'F') != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1197 + * + * if order == 'F': + * for idx in range(ndim): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + __pyx_t_2 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_idx = __pyx_t_4; + + /* "View.MemoryView":1198 + * if order == 'F': + * for idx in range(ndim): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * else: + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1199 + * for idx in range(ndim): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * else: + * for idx in range(ndim - 1, -1, -1): + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1201 + * stride *= shape[idx] + * else: + * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + /*else*/ { + for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { + __pyx_v_idx = __pyx_t_2; + + /* "View.MemoryView":1202 + * else: + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1203 + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * + * return stride + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + } + __pyx_L3:; + + /* "View.MemoryView":1205 + * stride *= shape[idx] + * + * return stride # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_data_to_temp') + */ + __pyx_r = __pyx_v_stride; + goto __pyx_L0; + + /* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { + int __pyx_v_i; + void *__pyx_v_result; + size_t __pyx_v_itemsize; + size_t __pyx_v_size; + void *__pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + struct __pyx_memoryview_obj *__pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1219 + * cdef void *result + * + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef size_t size = slice_get_size(src, ndim) + * + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1220 + * + * cdef size_t itemsize = src.memview.view.itemsize + * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< + * + * result = malloc(size) + */ + __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); + + /* "View.MemoryView":1222 + * cdef size_t size = slice_get_size(src, ndim) + * + * result = malloc(size) # <<<<<<<<<<<<<< + * if not result: + * _err(MemoryError, NULL) + */ + __pyx_v_result = malloc(__pyx_v_size); + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1224 + * result = malloc(size) + * if not result: + * _err(MemoryError, NULL) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + } + + /* "View.MemoryView":1227 + * + * + * tmpslice.data = result # <<<<<<<<<<<<<< + * tmpslice.memview = src.memview + * for i in range(ndim): + */ + __pyx_v_tmpslice->data = ((char *)__pyx_v_result); + + /* "View.MemoryView":1228 + * + * tmpslice.data = result + * tmpslice.memview = src.memview # <<<<<<<<<<<<<< + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + */ + __pyx_t_4 = __pyx_v_src->memview; + __pyx_v_tmpslice->memview = __pyx_t_4; + + /* "View.MemoryView":1229 + * tmpslice.data = result + * tmpslice.memview = src.memview + * for i in range(ndim): # <<<<<<<<<<<<<< + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1230 + * tmpslice.memview = src.memview + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< + * tmpslice.suboffsets[i] = -1 + * + */ + (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); + + /* "View.MemoryView":1231 + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, + */ + (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1233 + * tmpslice.suboffsets[i] = -1 + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< + * ndim, order) + * + */ + (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); + + /* "View.MemoryView":1237 + * + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1239 + * for i in range(ndim): + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< + * + * if slice_is_contig(src[0], order, ndim): + */ + (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + } + } + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1242 + * + * if slice_is_contig(src[0], order, ndim): + * memcpy(result, src.data, size) # <<<<<<<<<<<<<< + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + */ + (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":1244 + * memcpy(result, src.data, size) + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< + * + * return result + */ + /*else*/ { + copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); + } + __pyx_L9:; + + /* "View.MemoryView":1246 + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = NULL; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + +static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_extents", 0); + + /* "View.MemoryView":1254 + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + * (i, extent1, extent2)) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err_dim') + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":1253 + * cdef int _err_extents(int i, Py_ssize_t extent1, + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< + * (i, extent1, extent2)) + * + */ + __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 1253, __pyx_L1_error) + + /* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + +static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_dim", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1258 + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: + * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err') + */ + __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_INCREF(__pyx_v_error); + __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 1258, __pyx_L1_error) + + /* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + +static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":1263 + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: + * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< + * else: + * raise error + */ + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_error); + __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 1263, __pyx_L1_error) + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + } + + /* "View.MemoryView":1265 + * raise error(msg.decode('ascii')) + * else: + * raise error # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_contents') + */ + /*else*/ { + __Pyx_Raise(__pyx_v_error, 0, 0, 0); + __PYX_ERR(1, 1265, __pyx_L1_error) + } + + /* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { + void *__pyx_v_tmpdata; + size_t __pyx_v_itemsize; + int __pyx_v_i; + char __pyx_v_order; + int __pyx_v_broadcasting; + int __pyx_v_direct_copy; + __Pyx_memviewslice __pyx_v_tmp; + int __pyx_v_ndim; + int __pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + void *__pyx_t_7; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1276 + * Check for overlapping memory and verify the shapes. + * """ + * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + */ + __pyx_v_tmpdata = NULL; + + /* "View.MemoryView":1277 + * """ + * cdef void *tmpdata = NULL + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + */ + __pyx_t_1 = __pyx_v_src.memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1279 + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< + * cdef bint broadcasting = False + * cdef bint direct_copy = False + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); + + /* "View.MemoryView":1280 + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False # <<<<<<<<<<<<<< + * cdef bint direct_copy = False + * cdef __Pyx_memviewslice tmp + */ + __pyx_v_broadcasting = 0; + + /* "View.MemoryView":1281 + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False + * cdef bint direct_copy = False # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice tmp + * + */ + __pyx_v_direct_copy = 0; + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1285 + * + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1287 + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< + * + * cdef int ndim = max(src_ndim, dst_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + } + __pyx_L3:; + + /* "View.MemoryView":1289 + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + __pyx_t_3 = __pyx_v_dst_ndim; + __pyx_t_4 = __pyx_v_src_ndim; + if (((__pyx_t_3 > __pyx_t_4) != 0)) { + __pyx_t_5 = __pyx_t_3; + } else { + __pyx_t_5 = __pyx_t_4; + } + __pyx_v_ndim = __pyx_t_5; + + /* "View.MemoryView":1291 + * cdef int ndim = max(src_ndim, dst_ndim) + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + */ + __pyx_t_5 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_5; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1294 + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + * broadcasting = True # <<<<<<<<<<<<<< + * src.strides[i] = 0 + * else: + */ + __pyx_v_broadcasting = 1; + + /* "View.MemoryView":1295 + * if src.shape[i] == 1: + * broadcasting = True + * src.strides[i] = 0 # <<<<<<<<<<<<<< + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) + */ + (__pyx_v_src.strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + goto __pyx_L7; + } + + /* "View.MemoryView":1297 + * src.strides[i] = 0 + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< + * + * if src.suboffsets[i] >= 0: + */ + /*else*/ { + __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + } + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1300 + * + * if src.suboffsets[i] >= 0: + * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< + * + * if slices_overlap(&src, &dst, ndim, itemsize): + */ + __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + } + } + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1305 + * + * if not slice_is_contig(src, order, ndim): + * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + } + + /* "View.MemoryView":1307 + * order = get_best_order(&dst, ndim) + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< + * src = tmp + * + */ + __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) + __pyx_v_tmpdata = __pyx_t_7; + + /* "View.MemoryView":1308 + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + * src = tmp # <<<<<<<<<<<<<< + * + * if not broadcasting: + */ + __pyx_v_src = __pyx_v_tmp; + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1314 + * + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + goto __pyx_L12; + } + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1316 + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< + * + * if direct_copy: + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + } + __pyx_L12:; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_2 = (__pyx_v_direct_copy != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1320 + * if direct_copy: + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1321 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + */ + (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); + + /* "View.MemoryView":1322 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * free(tmpdata) + * return 0 + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1323 + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1324 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * if order == 'F' == get_best_order(&dst, ndim): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = (__pyx_v_order == 'F'); + if (__pyx_t_2) { + __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); + } + __pyx_t_8 = (__pyx_t_2 != 0); + if (__pyx_t_8) { + + /* "View.MemoryView":1329 + * + * + * transpose_memslice(&src) # <<<<<<<<<<<<<< + * transpose_memslice(&dst) + * + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) + + /* "View.MemoryView":1330 + * + * transpose_memslice(&src) + * transpose_memslice(&dst) # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1332 + * transpose_memslice(&dst) + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1333 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + */ + copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1334 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * free(tmpdata) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1336 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1337 + * + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_broadcast_leading') + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { + int __pyx_v_i; + int __pyx_v_offset; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + + /* "View.MemoryView":1344 + * int ndim_other) nogil: + * cdef int i + * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); + + /* "View.MemoryView":1346 + * cdef int offset = ndim_other - ndim + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1347 + * + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + */ + (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); + + /* "View.MemoryView":1348 + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + */ + (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1349 + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< + * + * for i in range(offset): + */ + (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); + } + + /* "View.MemoryView":1351 + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + * for i in range(offset): # <<<<<<<<<<<<<< + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + */ + __pyx_t_1 = __pyx_v_offset; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1352 + * + * for i in range(offset): + * mslice.shape[i] = 1 # <<<<<<<<<<<<<< + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 + */ + (__pyx_v_mslice->shape[__pyx_v_i]) = 1; + + /* "View.MemoryView":1353 + * for i in range(offset): + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< + * mslice.suboffsets[i] = -1 + * + */ + (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); + + /* "View.MemoryView":1354 + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { + int __pyx_t_1; + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + __pyx_t_1 = (__pyx_v_dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1367 + * + * if dtype_is_object: + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< + * dst.strides, ndim, inc) + * + */ + __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + } + + /* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + + /* function exit code */ +} + +/* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + __Pyx_RefNannyDeclarations + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); + + /* "View.MemoryView":1374 + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif +} + +/* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + +static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); + + /* "View.MemoryView":1381 + * cdef Py_ssize_t i + * + * for i in range(shape[0]): # <<<<<<<<<<<<<< + * if ndim == 1: + * if inc: + */ + __pyx_t_1 = (__pyx_v_shape[0]); + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + __pyx_t_4 = (__pyx_v_inc != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1384 + * if ndim == 1: + * if inc: + * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * Py_DECREF(( data)[0]) + */ + Py_INCREF((((PyObject **)__pyx_v_data)[0])); + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":1386 + * Py_INCREF(( data)[0]) + * else: + * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + */ + /*else*/ { + Py_DECREF((((PyObject **)__pyx_v_data)[0])); + } + __pyx_L6:; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + goto __pyx_L5; + } + + /* "View.MemoryView":1388 + * Py_DECREF(( data)[0]) + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, inc) + * + */ + /*else*/ { + + /* "View.MemoryView":1389 + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + * ndim - 1, inc) # <<<<<<<<<<<<<< + * + * data += strides[0] + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); + } + __pyx_L5:; + + /* "View.MemoryView":1391 + * ndim - 1, inc) + * + * data += strides[0] # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); + } + + /* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { + + /* "View.MemoryView":1400 + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1401 + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1403 + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + +static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_extent; + int __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + + /* "View.MemoryView":1411 + * size_t itemsize, void *item) nogil: + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t extent = shape[0] + * + */ + __pyx_v_stride = (__pyx_v_strides[0]); + + /* "View.MemoryView":1412 + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] + * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_extent = (__pyx_v_shape[0]); + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1415 + * + * if ndim == 1: + * for i in range(extent): # <<<<<<<<<<<<<< + * memcpy(data, item, itemsize) + * data += stride + */ + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1416 + * if ndim == 1: + * for i in range(extent): + * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< + * data += stride + * else: + */ + (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); + + /* "View.MemoryView":1417 + * for i in range(extent): + * memcpy(data, item, itemsize) + * data += stride # <<<<<<<<<<<<<< + * else: + * for i in range(extent): + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1419 + * data += stride + * else: + * for i in range(extent): # <<<<<<<<<<<<<< + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + */ + /*else*/ { + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1420 + * else: + * for i in range(extent): + * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, itemsize, item) + * data += stride + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1422 + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + * data += stride # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + + /* function exit code */ +} + +/* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v___pyx_type = 0; + long __pyx_v___pyx_checksum; + PyObject *__pyx_v___pyx_state = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v___pyx_type = values[0]; + __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_v___pyx_state = values[2]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_v___pyx_PickleError = 0; + PyObject *__pyx_v___pyx_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); + if (__pyx_t_1) { + + /* "(tree fragment)":5 + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + */ + __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_n_s_PickleError); + __Pyx_GIVEREF(__pyx_n_s_PickleError); + PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); + __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_t_2); + __pyx_v___pyx_PickleError = __pyx_t_2; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":6 + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + */ + __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_INCREF(__pyx_v___pyx_PickleError); + __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 6, __pyx_L1_error) + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + } + + /* "(tree fragment)":7 + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v___pyx_result = __pyx_t_3; + __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + __pyx_t_1 = (__pyx_v___pyx_state != Py_None); + __pyx_t_6 = (__pyx_t_1 != 0); + if (__pyx_t_6) { + + /* "(tree fragment)":9 + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) + __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + } + + /* "(tree fragment)":10 + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result # <<<<<<<<<<<<<< + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v___pyx_result); + __pyx_r = __pyx_v___pyx_result; + goto __pyx_L0; + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v___pyx_PickleError); + __Pyx_XDECREF(__pyx_v___pyx_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); + + /* "(tree fragment)":12 + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 12, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v___pyx_result->name); + __Pyx_DECREF(__pyx_v___pyx_result->name); + __pyx_v___pyx_result->name = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 13, __pyx_L1_error) + } + __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_4 = ((__pyx_t_3 > 1) != 0); + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_5 = (__pyx_t_4 != 0); + __pyx_t_2 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (__pyx_t_2) { + + /* "(tree fragment)":14 + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< + */ + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 14, __pyx_L1_error) + } + __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + } + } + __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + } + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static struct __pyx_vtabstruct_array __pyx_vtable_array; + +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_array_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_array_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_array; + p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_array(PyObject *o) { + struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_array___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->mode); + Py_CLEAR(p->_format); + (*Py_TYPE(o)->tp_free)(o); +} +static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_array___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { + PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); + if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + v = __pyx_array___getattr__(o, n); + } + return v; +} + +static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); +} + +static PyMethodDef __pyx_methods_array[] = { + {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_array[] = { + {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_array = { + __pyx_array___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_array, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_array = { + __pyx_array___len__, /*mp_length*/ + __pyx_array___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_array = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_array_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_array = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.array", /*tp_name*/ + sizeof(struct __pyx_array_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_array, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + __pyx_tp_getattro_array, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_array, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_array, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_array, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_MemviewEnum_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_MemviewEnum_obj *)o); + p->name = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_Enum(PyObject *o) { + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + Py_CLEAR(p->name); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + if (p->name) { + e = (*v)(p->name, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_Enum(PyObject *o) { + PyObject* tmp; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + tmp = ((PyObject*)p->name); + p->name = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyMethodDef __pyx_methods_Enum[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_MemviewEnum = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.Enum", /*tp_name*/ + sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_Enum, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_MemviewEnum___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_Enum, /*tp_traverse*/ + __pyx_tp_clear_Enum, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_Enum, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_MemviewEnum___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_Enum, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; + +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryview_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryview_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_memoryview; + p->obj = Py_None; Py_INCREF(Py_None); + p->_size = Py_None; Py_INCREF(Py_None); + p->_array_interface = Py_None; Py_INCREF(Py_None); + p->view.obj = NULL; + if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_memoryview(PyObject *o) { + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryview___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->obj); + Py_CLEAR(p->_size); + Py_CLEAR(p->_array_interface); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + if (p->obj) { + e = (*v)(p->obj, a); if (e) return e; + } + if (p->_size) { + e = (*v)(p->_size, a); if (e) return e; + } + if (p->_array_interface) { + e = (*v)(p->_array_interface, a); if (e) return e; + } + if (p->view.obj) { + e = (*v)(p->view.obj, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_memoryview(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + tmp = ((PyObject*)p->obj); + p->obj = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_size); + p->_size = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_array_interface); + p->_array_interface = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + Py_CLEAR(p->view.obj); + return 0; +} +static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_memoryview___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); +} + +static PyMethodDef __pyx_methods_memoryview[] = { + {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, + {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, + {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, + {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_memoryview[] = { + {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, + {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, + {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, + {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, + {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, + {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, + {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, + {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, + {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_memoryview = { + __pyx_memoryview___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_memoryview, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_memoryview = { + __pyx_memoryview___len__, /*mp_length*/ + __pyx_memoryview___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_memoryview = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_memoryview_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_memoryview = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.memoryview", /*tp_name*/ + sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_memoryview___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + __pyx_memoryview___str__, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_memoryview, /*tp_traverse*/ + __pyx_tp_clear_memoryview, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_memoryview, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_memoryview, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_memoryview, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; + +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryviewslice_obj *p; + PyObject *o = __pyx_tp_new_memoryview(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryviewslice_obj *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; + p->from_object = Py_None; Py_INCREF(Py_None); + p->from_slice.memview = NULL; + return o; +} + +static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryviewslice___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->from_object); + PyObject_GC_Track(o); + __pyx_tp_dealloc_memoryview(o); +} + +static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; + if (p->from_object) { + e = (*v)(p->from_object, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear__memoryviewslice(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + __pyx_tp_clear_memoryview(o); + tmp = ((PyObject*)p->from_object); + p->from_object = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + __PYX_XDEC_MEMVIEW(&p->from_slice, 1); + return 0; +} + +static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); +} + +static PyMethodDef __pyx_methods__memoryviewslice[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { + {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_memoryviewslice = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core._memoryviewslice", /*tp_name*/ + sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___repr__, /*tp_repr*/ + #else + 0, /*tp_repr*/ + #endif + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___str__, /*tp_str*/ + #else + 0, /*tp_str*/ + #endif + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "Internal class for passing memoryview slices to Python", /*tp_doc*/ + __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ + __pyx_tp_clear__memoryviewslice, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods__memoryviewslice, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets__memoryviewslice, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new__memoryviewslice, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_core}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "core", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, + {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, + {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, + {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, + {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, + {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, + {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, + {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, + {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, + {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, + {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, + {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, + {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, + {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, + {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, + {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, + {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, + {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, + {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, + {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, + {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, + {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, + {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, + {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, + {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, + {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, + {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, + {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, + {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, + {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, + {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, + {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, + {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, + {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, + {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, + {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, + {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, + {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, + {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, + {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, + {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, + {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, + {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) + __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) + __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) + __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) + __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_INCREF(__pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_int_neg_1); + PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_tuple__13); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__18); + __Pyx_GIVEREF(__pyx_tuple__18); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__19); + __Pyx_GIVEREF(__pyx_tuple__19); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__20); + __Pyx_GIVEREF(__pyx_tuple__20); + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__22); + __Pyx_GIVEREF(__pyx_tuple__22); + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__24); + __Pyx_GIVEREF(__pyx_tuple__24); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__25); + __Pyx_GIVEREF(__pyx_tuple__25); + __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + /* InitThreads.init */ + #ifdef WITH_THREAD +PyEval_InitThreads(); +#endif + +if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + generic = Py_None; Py_INCREF(Py_None); + strided = Py_None; Py_INCREF(Py_None); + indirect = Py_None; Py_INCREF(Py_None); + contiguous = Py_None; Py_INCREF(Py_None); + indirect_contiguous = Py_None; Py_INCREF(Py_None); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_vtabptr_array = &__pyx_vtable_array; + __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; + if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_array.tp_print = 0; + #endif + if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + __pyx_array_type = &__pyx_type___pyx_array; + if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_MemviewEnum.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; + __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; + __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; + __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; + __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; + __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; + __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; + __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; + __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; + if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryview.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + __pyx_memoryview_type = &__pyx_type___pyx_memoryview; + __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; + __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; + __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; + __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; + __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; + if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryviewslice.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initcore(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_core(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + static PyThread_type_lock __pyx_t_2[8]; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_monotonic_align__core) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "monotonic_align.core")) { + if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_type_import_code(); + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + __pyx_k_ = (-1e9); + + /* "monotonic_align/core.pyx":1 + * cimport cython # <<<<<<<<<<<<<< + * from cython.parallel import prange + * + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":209 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * def __dealloc__(array self): + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_array_type); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(generic); + __Pyx_DECREF_SET(generic, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(strided); + __Pyx_DECREF_SET(strided, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect); + __Pyx_DECREF_SET(indirect, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(contiguous); + __Pyx_DECREF_SET(contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect_contiguous); + __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":316 + * + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ + * PyThread_allocate_lock(), + */ + __pyx_memoryview_thread_locks_used = 0; + + /* "View.MemoryView":317 + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< + * PyThread_allocate_lock(), + * PyThread_allocate_lock(), + */ + __pyx_t_2[0] = PyThread_allocate_lock(); + __pyx_t_2[1] = PyThread_allocate_lock(); + __pyx_t_2[2] = PyThread_allocate_lock(); + __pyx_t_2[3] = PyThread_allocate_lock(); + __pyx_t_2[4] = PyThread_allocate_lock(); + __pyx_t_2[5] = PyThread_allocate_lock(); + __pyx_t_2[6] = PyThread_allocate_lock(); + __pyx_t_2[7] = PyThread_allocate_lock(); + memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); + + /* "View.MemoryView":549 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryview_type); + + /* "View.MemoryView":995 + * return self.from_object + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryviewslice_type); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* MemviewSliceInit */ +static int +__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference) +{ + __Pyx_RefNannyDeclarations + int i, retval=-1; + Py_buffer *buf = &memview->view; + __Pyx_RefNannySetupContext("init_memviewslice", 0); + if (unlikely(memviewslice->memview || memviewslice->data)) { + PyErr_SetString(PyExc_ValueError, + "memviewslice is already initialized!"); + goto fail; + } + if (buf->strides) { + for (i = 0; i < ndim; i++) { + memviewslice->strides[i] = buf->strides[i]; + } + } else { + Py_ssize_t stride = buf->itemsize; + for (i = ndim - 1; i >= 0; i--) { + memviewslice->strides[i] = stride; + stride *= buf->shape[i]; + } + } + for (i = 0; i < ndim; i++) { + memviewslice->shape[i] = buf->shape[i]; + if (buf->suboffsets) { + memviewslice->suboffsets[i] = buf->suboffsets[i]; + } else { + memviewslice->suboffsets[i] = -1; + } + } + memviewslice->memview = memview; + memviewslice->data = (char *)buf->buf; + if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { + Py_INCREF(memview); + } + retval = 0; + goto no_fail; +fail: + memviewslice->memview = 0; + memviewslice->data = 0; + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} +#ifndef Py_NO_RETURN +#define Py_NO_RETURN +#endif +static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { + va_list vargs; + char msg[200]; +#ifdef HAVE_STDARG_PROTOTYPES + va_start(vargs, fmt); +#else + va_start(vargs); +#endif + vsnprintf(msg, 200, fmt, vargs); + va_end(vargs); + Py_FatalError(msg); +} +static CYTHON_INLINE int +__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)++; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE int +__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)--; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE void +__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) +{ + int first_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) + return; + if (unlikely(__pyx_get_slice_count(memview) < 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + first_time = __pyx_add_acquisition_count(memview) == 0; + if (unlikely(first_time)) { + if (have_gil) { + Py_INCREF((PyObject *) memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_INCREF((PyObject *) memview); + PyGILState_Release(_gilstate); + } + } +} +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, + int have_gil, int lineno) { + int last_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + memslice->memview = NULL; + return; + } + if (unlikely(__pyx_get_slice_count(memview) <= 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + last_time = __pyx_sub_acquisition_count(memview) == 1; + memslice->data = NULL; + if (unlikely(last_time)) { + if (have_gil) { + Py_CLEAR(memslice->memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_CLEAR(memslice->memview); + PyGILState_Release(_gilstate); + } + } else { + memslice->memview = NULL; + } +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* None */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { + PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* BytesEquals */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + +/* None */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { + Py_ssize_t q = a / b; + Py_ssize_t r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* GetAttr */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* ObjectGetItem */ +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { + PyObject *runerr; + Py_ssize_t key_value; + PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; + if (unlikely(!(m && m->sq_item))) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); + return NULL; + } + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { + PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; + if (likely(m && m->mp_subscript)) { + return m->mp_subscript(obj, key); + } + return __Pyx_PyObject_GetIndex(obj, key); +} +#endif + +/* decode_c_string */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + Py_ssize_t length; + if (unlikely((start < 0) | (stop < 0))) { + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (unlikely(stop <= start)) + return __Pyx_NewRef(__pyx_empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetAttr3 */ +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r = __Pyx_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +} + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* ExtTypeTest */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0 || (x^b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + const digit* digits = ((PyLongObject*)op1)->ob_digit; + const Py_ssize_t size = Py_SIZE(op1); + if (likely(__Pyx_sst_abs(size) <= 1)) { + a = likely(size) ? digits[0] : 0; + if (size == -1) a = -a; + } else { + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + } + x = a + b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla + llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + double result; + PyFPE_START_PROTECT("add", return NULL) + result = ((double)a) + (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#endif + +/* None */ +static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* HasAttr */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (unlikely(!r)) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* SetVTable */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { +#if PY_VERSION_HEX >= 0x02070000 + PyObject *ob = PyCapsule_New(vtable, 0, 0); +#else + PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); +#endif + if (!ob) + goto bad; + if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + +/* MemviewSliceIsContig */ +static int +__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) +{ + int i, index, step, start; + Py_ssize_t itemsize = mvs.memview->view.itemsize; + if (order == 'F') { + step = 1; + start = 0; + } else { + step = -1; + start = ndim - 1; + } + for (i = 0; i < ndim; i++) { + index = start + step * i; + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + return 0; + itemsize *= mvs.shape[index]; + } + return 1; +} + +/* OverlappingSlices */ +static void +__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, + void **out_start, void **out_end, + int ndim, size_t itemsize) +{ + char *start, *end; + int i; + start = end = slice->data; + for (i = 0; i < ndim; i++) { + Py_ssize_t stride = slice->strides[i]; + Py_ssize_t extent = slice->shape[i]; + if (extent == 0) { + *out_start = *out_end = start; + return; + } else { + if (stride > 0) + end += stride * (extent - 1); + else + start += stride * (extent - 1); + } + } + *out_start = start; + *out_end = end + itemsize; +} +static int +__pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize) +{ + void *start1, *end1, *start2, *end2; + __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); + __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); + return (start1 < end2) && (start2 < end1); +} + +/* Capsule */ +static CYTHON_INLINE PyObject * +__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) +{ + PyObject *cobj; +#if PY_VERSION_HEX >= 0x02070000 + cobj = PyCapsule_New(p, sig, NULL); +#else + cobj = PyCObject_FromVoidPtr(p, NULL); +#endif + return cobj; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* TypeInfoCompare */ + static int +__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) +{ + int i; + if (!a || !b) + return 0; + if (a == b) + return 1; + if (a->size != b->size || a->typegroup != b->typegroup || + a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { + if (a->typegroup == 'H' || b->typegroup == 'H') { + return a->size == b->size; + } else { + return 0; + } + } + if (a->ndim) { + for (i = 0; i < a->ndim; i++) + if (a->arraysize[i] != b->arraysize[i]) + return 0; + } + if (a->typegroup == 'S') { + if (a->flags != b->flags) + return 0; + if (a->fields || b->fields) { + if (!(a->fields && b->fields)) + return 0; + for (i = 0; a->fields[i].type && b->fields[i].type; i++) { + __Pyx_StructField *field_a = a->fields + i; + __Pyx_StructField *field_b = b->fields + i; + if (field_a->offset != field_b->offset || + !__pyx_typeinfo_cmp(field_a->type, field_b->type)) + return 0; + } + return !a->fields[i].type && !b->fields[i].type; + } + } + return 1; +} + +/* MemviewSliceValidateAndInit */ + static int +__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) +{ + if (buf->shape[dim] <= 1) + return 1; + if (buf->strides) { + if (spec & __Pyx_MEMVIEW_CONTIG) { + if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { + if (unlikely(buf->strides[dim] != sizeof(void *))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly contiguous " + "in dimension %d.", dim); + goto fail; + } + } else if (unlikely(buf->strides[dim] != buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_FOLLOW) { + Py_ssize_t stride = buf->strides[dim]; + if (stride < 0) + stride = -stride; + if (unlikely(stride < buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + } else { + if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not contiguous in " + "dimension %d", dim); + goto fail; + } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not indirect in " + "dimension %d", dim); + goto fail; + } else if (unlikely(buf->suboffsets)) { + PyErr_SetString(PyExc_ValueError, + "Buffer exposes suboffsets but no strides"); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) +{ + if (spec & __Pyx_MEMVIEW_DIRECT) { + if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { + PyErr_Format(PyExc_ValueError, + "Buffer not compatible with direct access " + "in dimension %d.", dim); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_PTR) { + if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly accessible " + "in dimension %d.", dim); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) +{ + int i; + if (c_or_f_flag & __Pyx_IS_F_CONTIG) { + Py_ssize_t stride = 1; + for (i = 0; i < ndim; i++) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not fortran contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { + Py_ssize_t stride = 1; + for (i = ndim - 1; i >- 1; i--) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not C contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } + return 1; +fail: + return 0; +} +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj) +{ + struct __pyx_memoryview_obj *memview, *new_memview; + __Pyx_RefNannyDeclarations + Py_buffer *buf; + int i, spec = 0, retval = -1; + __Pyx_BufFmt_Context ctx; + int from_memoryview = __pyx_memoryview_check(original_obj); + __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); + if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) + original_obj)->typeinfo)) { + memview = (struct __pyx_memoryview_obj *) original_obj; + new_memview = NULL; + } else { + memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + original_obj, buf_flags, 0, dtype); + new_memview = memview; + if (unlikely(!memview)) + goto fail; + } + buf = &memview->view; + if (unlikely(buf->ndim != ndim)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + ndim, buf->ndim); + goto fail; + } + if (new_memview) { + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; + } + if (unlikely((unsigned) buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " + "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", + buf->itemsize, + (buf->itemsize > 1) ? "s" : "", + dtype->name, + dtype->size, + (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->len > 0) { + for (i = 0; i < ndim; i++) { + spec = axes_specs[i]; + if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) + goto fail; + if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) + goto fail; + } + if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) + goto fail; + } + if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, + new_memview != NULL) == -1)) { + goto fail; + } + retval = 0; + goto no_fail; +fail: + Py_XDECREF(new_memview); + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, + &__Pyx_TypeInfo_float, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* MemviewSliceCopyTemplate */ + static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object) +{ + __Pyx_RefNannyDeclarations + int i; + __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; + struct __pyx_memoryview_obj *from_memview = from_mvs->memview; + Py_buffer *buf = &from_memview->view; + PyObject *shape_tuple = NULL; + PyObject *temp_int = NULL; + struct __pyx_array_obj *array_obj = NULL; + struct __pyx_memoryview_obj *memview_obj = NULL; + __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); + for (i = 0; i < ndim; i++) { + if (unlikely(from_mvs->suboffsets[i] >= 0)) { + PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " + "indirect dimensions (axis %d)", i); + goto fail; + } + } + shape_tuple = PyTuple_New(ndim); + if (unlikely(!shape_tuple)) { + goto fail; + } + __Pyx_GOTREF(shape_tuple); + for(i = 0; i < ndim; i++) { + temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); + if(unlikely(!temp_int)) { + goto fail; + } else { + PyTuple_SET_ITEM(shape_tuple, i, temp_int); + temp_int = NULL; + } + } + array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); + if (unlikely(!array_obj)) { + goto fail; + } + __Pyx_GOTREF(array_obj); + memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + (PyObject *) array_obj, contig_flag, + dtype_is_object, + from_mvs->memview->typeinfo); + if (unlikely(!memview_obj)) + goto fail; + if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) + goto fail; + if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, + dtype_is_object) < 0)) + goto fail; + goto no_fail; +fail: + __Pyx_XDECREF(new_mvs.memview); + new_mvs.memview = NULL; + new_mvs.data = NULL; +no_fail: + __Pyx_XDECREF(shape_tuple); + __Pyx_XDECREF(temp_int); + __Pyx_XDECREF(array_obj); + __Pyx_RefNannyFinishContext(); + return new_mvs; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { + const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(char) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (char) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { + return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { + return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { + return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(char) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) + case -2: + if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + } +#endif + if (sizeof(char) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + char val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (char) -1; + } + } else { + char val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (char) -1; + val = __Pyx_PyInt_As_char(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to char"); + return (char) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to char"); + return (char) -1; +} + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/nemo/collections/tts/modules/monotonic_align/core.pyx b/nemo/collections/tts/modules/monotonic_align/core.pyx new file mode 100644 index 000000000000..bfaabd4d21c2 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/core.pyx @@ -0,0 +1,42 @@ +cimport cython +from cython.parallel import prange + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: + cdef int x + cdef int y + cdef float v_prev + cdef float v_cur + cdef float tmp + cdef int index = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y-1, x] + if x == 0: + if y == 0: + v_prev = 0. + else: + v_prev = max_neg_val + else: + v_prev = value[y-1, x-1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + index = index - 1 + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + cdef int b = paths.shape[0] + cdef int i + for i in prange(b, nogil=True): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py new file mode 100644 index 000000000000..30c224807a70 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/setup.py @@ -0,0 +1,9 @@ +from distutils.core import setup +from Cython.Build import cythonize +import numpy + +setup( + name = 'monotonic_align', + ext_modules = cythonize("core.pyx"), + include_dirs=[numpy.get_include()] +) diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py new file mode 100644 index 000000000000..0d8bc0c15522 --- /dev/null +++ b/nemo/collections/tts/modules/vits_mel_processing.py @@ -0,0 +1,77 @@ +import torch +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn + +MAX_WAV_VALUE = 32768.0 + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + """ + PARAMS + ------ + C: compression factor + """ + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + """ + PARAMS + ------ + C: compression factor used to compress + """ + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + return spec + + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) + melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) + return melspec \ No newline at end of file diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py new file mode 100644 index 000000000000..9545024fc195 --- /dev/null +++ b/nemo/collections/tts/modules/vits_modules.py @@ -0,0 +1,1603 @@ +import numpy as np +import math + +import numpy as np +import torch +from torch import nn +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn import functional as F +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm + +from nemo.collections.tts.modules.monotonic_align import maximum_path +from nemo.collections.tts.modules.vits_mel_processing import librosa_mel_fn, spectral_normalize_torch + + +LRELU_SLOPE = 0.1 + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size ** i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, + groups=channels, dilation=dilation, padding=padding + )) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size, + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = fused_add_tanh_sigmoid_multiply( + x_in, + g_l, + n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, :self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels:, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, + gin_channels=gin_channels) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + +class ConvFlow(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) + self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins:] + + x1, logabsdet = piecewise_rational_quadratic_transform(x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = Log() + self.flows = nn.ModuleList() + self.flows.append(ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) + logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_1 = LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_2 = LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.emb = nn.Embedding(n_vocab, hidden_channels) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + + self.encoder = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths): + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append(Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class Generator(torch.nn.Module): + def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = ResBlock1 if resblock == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append(weight_norm( + ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), + k, u, padding=(k-u)//2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel//(2**(i+1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x) + else: + xs += self.resblocks[i*self.num_kernels+j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2,3,5,7,11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__(self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder(n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, x, x_lengths, y, y_lengths, sid=None): + + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging + + # expand prior + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) + + z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = generate_path(w_ceil, attn_mask) + + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, reverse=True) + o = self.dec((z * y_mask)[:,:,:max_len], g=g) + return o, attn, y_mask, (z, z_p, m_p, logs_p) + + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 0, "n_speakers have to be larger than 0." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) + +################## +# Mel_processing # +################## + +mel_basis = {} +hann_window = {} + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + + return spec + + +########### +# Commons # +########### + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size*dilation - dilation)/2) + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str_max = ids_str_max.to(device=x.device) + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d( + length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (num_timescales - 1)) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2,3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1. / norm_type) + return total_norm + + +############## +# Attentions # +############## +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))).unsqueeze(0).unsqueeze(0).to(device=x.device, dtype=x.dtype) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert t_s == t_t, "Local attention is only available for self-attention." + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0,0],[0,0],[0,length-1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) + x_flat = x.view([batch, heads, length**2 + length*(length -1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + +############## +# Transforms # +############## + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = { + 'tails': tails, + 'tail_bound': tail_bound + } + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum( + inputs[..., None] >= bin_locations, + dim=-1 + ) - 1 + + +def unconstrained_rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == 'linear': + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError('{} tails are not implemented.'.format(tails)) + + outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative + ) + + return outputs, logabsdet + +def rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0., right=1., bottom=0., top=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError('Input to a transform is not within its domain') + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError('Minimal bin width too large for the number of bins') + if min_bin_height * num_bins > 1.0: + raise ValueError('Minimal bin height too large for the number of bins') + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (((inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta) + + input_heights * (input_delta - input_derivatives))) + b = (input_heights * input_derivatives + - (inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta)) + c = - input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet diff --git a/setup.py b/setup.py index 950addaa4092..57af56c1e594 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,10 @@ from itertools import chain import importlib.util +from distutils.core import setup +from Cython.Build import cythonize +import numpy + import setuptools @@ -105,6 +109,15 @@ def req_file(filename, folder="requirements"): tests_requirements = extras_require["test"] +############################################################################### +# Monotonic Align # +# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # + +setup( + name = 'monotonic_align', + ext_modules = cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), + include_dirs=[numpy.get_include()] +) ############################################################################### # Code style checkers # From 012d88ff1e2d1782b029504cbd5334fe19b9bfaa Mon Sep 17 00:00:00 2001 From: Jason Date: Thu, 3 Feb 2022 08:32:29 -0800 Subject: [PATCH 063/244] make new commit Signed-off-by: Jason --- examples/tts/conf/vits.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 20146d0902ef..10ca56722c03 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -1,7 +1,5 @@ name: "VITS" -labels: [' ', '!', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'a', 'b', 'c', 'd', 'e', 'f', 'g', - 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] train_dataset: ??? validation_datasets: ??? test_datasets: null From cd683608c57dbb877c70fab838cf1bf6d00c0763 Mon Sep 17 00:00:00 2001 From: Jason Date: Thu, 3 Feb 2022 08:59:50 -0800 Subject: [PATCH 064/244] add copyright headers Signed-off-by: Jason --- examples/tts/vits.py | 2 +- nemo/collections/tts/losses/hifigan_losses.py | 2 +- nemo/collections/tts/losses/vits_losses.py | 126 +-- nemo/collections/tts/models/vits.py | 93 ++- .../tts/modules/monotonic_align/__init__.py | 54 +- .../tts/modules/monotonic_align/core.c | 37 + .../tts/modules/monotonic_align/setup.py | 42 +- .../tts/modules/vits_mel_processing.py | 59 +- nemo/collections/tts/modules/vits_modules.py | 765 +++++++++++------- 9 files changed, 743 insertions(+), 437 deletions(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 24219dbd97d5..ac16c7d19a54 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -1,4 +1,4 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/nemo/collections/tts/losses/hifigan_losses.py b/nemo/collections/tts/losses/hifigan_losses.py index 1386606b3f84..649f075994d8 100644 --- a/nemo/collections/tts/losses/hifigan_losses.py +++ b/nemo/collections/tts/losses/hifigan_losses.py @@ -35,7 +35,7 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -# The forward functions onf the following classes are based on code from https://github.com/jik876/hifi-gan: +# The forward functions of the following classes are based on code from https://github.com/jik876/hifi-gan: # FeatureMatchingLoss, DiscriminatorLoss, GeneratorLoss import torch diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 10e1f7836aec..dc24ce249e64 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -1,3 +1,42 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# The forward functions of the following classes are based on code from https://github.com/jaywalnut310/vits: +# KlLoss + import torch from nemo.core.classes import Loss, typecheck @@ -5,91 +44,6 @@ from nemo.core.neural_types.neural_type import NeuralType -class FeatureLoss(Loss): - def input_types(self): - return { - "fmap_r": [[NeuralType(elements_type=VoidType())]], - "fmap_g": [[NeuralType(elements_type=VoidType())]], - } - - @property - def output_types(self): - return { - "loss": NeuralType(elements_type=LossType()), - } - - # @typecheck() - def forward(self, fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -class DiscriminatorLoss(Loss): - @property - def input_types(self): - return { - "disc_real_outputs": [NeuralType(('B', 'T'), VoidType())], - "disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())], - } - - @property - def output_types(self): - return { - "loss": NeuralType(elements_type=LossType()), - "real_losses": [NeuralType(elements_type=LossType())], - "fake_losses": [NeuralType(elements_type=LossType())], - } - - def forward(self, disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -class GeneratorLoss(Loss): - """Generator Loss module""" - - @property - def input_types(self): - return { - "disc_outputs": [NeuralType(('B', 'T'), VoidType())], - } - - @property - def output_types(self): - return { - "loss": NeuralType(elements_type=LossType()), - "fake_losses": [NeuralType(elements_type=LossType())], - } - - @typecheck() - def forward(self, disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - class KlLoss(Loss): @property def input_types(self): @@ -120,7 +74,7 @@ def forward(self, z_p, logs_q, m_p, logs_p, z_mask): z_mask = z_mask.float() kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) + kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) kl = torch.sum(kl * z_mask) l = kl / torch.sum(z_mask) return l diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 684d41d53a37..1c19ff3411f8 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -1,3 +1,17 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import omegaconf import torch from dataclasses import dataclass @@ -9,9 +23,16 @@ from typing import Any, Dict from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy -from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureLoss, GeneratorLoss, KlLoss +from nemo.collections.tts.losses.hifigan_losses import FeatureMatchingLoss, DiscriminatorLoss, GeneratorLoss +from nemo.collections.tts.losses.vits_losses import KlLoss from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_modules import SynthesizerTrn, MultiPeriodDiscriminator, spec_to_mel_torch, slice_segments, clip_grad_value_ +from nemo.collections.tts.modules.vits_modules import ( + SynthesizerTrn, + MultiPeriodDiscriminator, + spec_to_mel_torch, + slice_segments, + clip_grad_value_, +) from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging @@ -31,7 +52,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): if isinstance(cfg, dict): cfg = OmegaConf.create(cfg) - + super().__init__(cfg=cfg, trainer=trainer) schema = OmegaConf.structured(VitsConfig) @@ -51,7 +72,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.generator = instantiate(cfg.generator) self.multiperioddisc = MultiPeriodDiscriminator() - self.feat_matching_loss = FeatureLoss() + self.feat_matching_loss = FeatureMatchingLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() self.kl_loss = KlLoss() @@ -82,23 +103,23 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.win_length = cfg.train_ds.dataset.win_length self.net_g = SynthesizerTrn( - n_vocab = cfg.symbols_embedding_dim, - spec_channels = cfg.train_ds.dataset.n_fft // 2 + 1, - segment_size = cfg.segment_size // cfg.train_ds.dataset.hop_length, - inter_channels = cfg.inter_channels, - hidden_channels = cfg.hidden_channels, - filter_channels = cfg.filter_channels, - n_heads = cfg.n_heads, - n_layers = cfg.n_layers, - kernel_size = cfg.pitch_embedding_kernel_size, - p_dropout = cfg.p_dropout, - resblock = cfg.generator.resblock, - resblock_kernel_sizes = cfg.generator.resblock_kernel_sizes, - resblock_dilation_sizes = cfg.generator.resblock_dilation_sizes, - upsample_rates = cfg.generator.upsample_rates, - upsample_initial_channel = cfg.generator.upsample_initial_channel, - upsample_kernel_sizes = cfg.generator.upsample_kernel_sizes, - ) + n_vocab=cfg.symbols_embedding_dim, + spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, + segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, + inter_channels=cfg.inter_channels, + hidden_channels=cfg.hidden_channels, + filter_channels=cfg.filter_channels, + n_heads=cfg.n_heads, + n_layers=cfg.n_layers, + kernel_size=cfg.pitch_embedding_kernel_size, + p_dropout=cfg.p_dropout, + resblock=cfg.generator.resblock, + resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, + resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, + upsample_rates=cfg.generator.upsample_rates, + upsample_initial_channel=cfg.generator.upsample_initial_channel, + upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, + ) self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) self.automatic_optimization = False @@ -124,15 +145,11 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): self.optim_g = torch.optim.AdamW( - self.net_g.parameters(), - self._cfg.lr, - betas=self._cfg.betas, - eps=self._cfg.eps) + self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps + ) self.optim_d = torch.optim.AdamW( - self.net_d.parameters(), - self._cfg.lr, - betas=self._cfg.betas, - eps=self._cfg.eps) + self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps + ) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(self.optim_g, gamma=self._cfg.lr_decay) scheduler_g_dict = { @@ -140,10 +157,7 @@ def configure_optimizers(self): 'interval': 'step', } scheduler_d = torch.optim.lr_scheduler.ExponentialLR(self.optim_d, gamma=self._cfg.lr_decay) - scheduler_d_dict = { - 'scheduler': scheduler_d, - 'interval': 'step' - } + scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} return [self.optim_g, self.optim_d], [scheduler_g_dict, scheduler_d_dict] def forward(self, batch, batch_idx): @@ -159,7 +173,6 @@ def forward(self, batch, batch_idx): return y_hat, y_hat_lengths - def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): spec = self.stft(audio) @@ -175,15 +188,16 @@ def training_step(self, batch, batch_idx): spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) with autocast(enabled=False): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ - (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g(x, x_lengths, spec, spec_lengths) + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + x, x_lengths, spec, spec_lengths + ) mel = spec_to_mel_torch( spec, self._cfg.filter_length, self._cfg.n_mel_channels, self._cfg.sample_rate, self._cfg.mel_fmin, - self._cfg.mel_fmax + self._cfg.mel_fmax, ) y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self._cfg.hop_size) @@ -195,7 +209,7 @@ def training_step(self, batch, batch_idx): self._cfg.hop_size, self._cfg.preprocessor.n_window_size, self._cfg.mel_fmin, - self._cfg.mel_fmax + self._cfg.mel_fmax, ) y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self._cfg.hop_size, self._cfg.segment_size) # slice @@ -216,7 +230,7 @@ def training_step(self, batch, batch_idx): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_fm = self.feat_matching_loss(fmap_r=fmap_r.detach(), fmap_g=fmap_g) loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl @@ -306,7 +320,6 @@ def _loader(cfg): return torch.utils.data.DataLoader( # noqa dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, ) - def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 9293c5af5d4a..ff0138663a82 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -1,19 +1,55 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + import numpy as np import torch from .core import maximum_path_c def maximum_path(neg_cent, mask): - """ Cython optimized version. + """ Cython optimized version. neg_cent: [b, t_t, t_s] mask: [b, t_t, t_s] """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/nemo/collections/tts/modules/monotonic_align/core.c b/nemo/collections/tts/modules/monotonic_align/core.c index 5631d20a9a00..3d483c44a473 100644 --- a/nemo/collections/tts/modules/monotonic_align/core.c +++ b/nemo/collections/tts/modules/monotonic_align/core.c @@ -1,3 +1,40 @@ +// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MIT License +// +// Copyright (c) 2021 Jaehyeon Kim +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + + /* Generated by Cython 0.29.21 */ /* BEGIN: Cython Metadata diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py index 30c224807a70..245556dcf7d6 100644 --- a/nemo/collections/tts/modules/monotonic_align/setup.py +++ b/nemo/collections/tts/modules/monotonic_align/setup.py @@ -1,9 +1,41 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + from distutils.core import setup from Cython.Build import cythonize import numpy -setup( - name = 'monotonic_align', - ext_modules = cythonize("core.pyx"), - include_dirs=[numpy.get_include()] -) +setup(name='monotonic_align', ext_modules=cythonize("core.pyx"), include_dirs=[numpy.get_include()]) diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py index 0d8bc0c15522..0858228b8611 100644 --- a/nemo/collections/tts/modules/vits_mel_processing.py +++ b/nemo/collections/tts/modules/vits_mel_processing.py @@ -1,3 +1,39 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + import torch import torch.utils.data from librosa.filters import mel as librosa_mel_fn @@ -38,9 +74,9 @@ def spectral_de_normalize_torch(magnitudes): def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: + if torch.min(y) < -1.0: print('min value is ', torch.min(y)) - if torch.max(y) > 1.: + if torch.max(y) > 1.0: print('max value is ', torch.max(y)) global hann_window @@ -49,11 +85,22 @@ def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False) if wnsize_dtype_device not in hann_window: hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = torch.nn.functional.pad( + y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect' + ) y = y.squeeze(1) - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[wnsize_dtype_device], + center=center, + pad_mode='reflect', + normalized=False, + onesided=True, + ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) return spec @@ -74,4 +121,4 @@ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - return melspec \ No newline at end of file + return melspec diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 9545024fc195..f6717d6fd1f8 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -1,3 +1,39 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + import numpy as np import math @@ -45,9 +81,7 @@ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_la self.norm_layers = nn.ModuleList() self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) for _ in range(n_layers - 1): self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) @@ -70,7 +104,7 @@ class DDSConv(nn.Module): Dialted and Depth-Separable Convolution """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): super().__init__() self.channels = channels self.kernel_size = kernel_size @@ -85,9 +119,9 @@ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): for i in range(n_layers): dilation = kernel_size ** i padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) + self.convs_sep.append( + nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding) + ) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) @@ -110,9 +144,9 @@ def forward(self, x, x_mask, g=None): class WN(torch.nn.Module): def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): super(WN, self).__init__() - assert (kernel_size % 2 == 1) + assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels - self.kernel_size = kernel_size, + self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels @@ -129,8 +163,9 @@ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_ch for i in range(n_layers): dilation = dilation_rate ** i padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, - dilation=dilation, padding=padding) + in_layer = torch.nn.Conv1d( + hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding + ) in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') self.in_layers.append(in_layer) @@ -155,21 +190,18 @@ def forward(self, x, x_mask, g=None, **kwargs): x_in = self.in_layers[i](x) if g is not None: cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] + g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in) - acts = fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) + acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: - res_acts = res_skip_acts[:, :self.hidden_channels, :] + res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels:, :] + output = output + res_skip_acts[:, self.hidden_channels :, :] else: output = output + res_skip_acts return output * x_mask @@ -186,24 +218,55 @@ def remove_weight_norm(self): class ResBlock1(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ) + ), + ] + ) self.convs1.apply(init_weights) - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)) + ), + weight_norm( + Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)) + ), + weight_norm( + Conv1d(channels, channels, kernel_size, 1, dilation=1, padding=get_padding(kernel_size, 1)) + ), + ] + ) self.convs2.apply(init_weights) def forward(self, x, x_mask=None): @@ -231,12 +294,30 @@ def remove_weight_norm(self): class ResBlock2(torch.nn.Module): def __init__(self, channels, kernel_size=3, dilation=(1, 3)): super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) + self.convs = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + ] + ) self.convs.apply(init_weights) def forward(self, x, x_mask=None): @@ -295,15 +376,17 @@ def forward(self, x, x_mask, reverse=False, **kwargs): class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False, + ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels @@ -315,8 +398,9 @@ def __init__(self, self.mean_only = mean_only self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, - gin_channels=gin_channels) + self.enc = WN( + hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels + ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() @@ -355,7 +439,7 @@ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins self.half_channels = in_channels // 2 self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() @@ -369,18 +453,19 @@ def forward(self, x, x_mask, g=None, reverse=False): b, c, t = x0.shape h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] + unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins :] - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound, + ) x = torch.cat([x0, x1], 1) * x_mask logdet = torch.sum(logabsdet * x_mask, [1, 2]) @@ -393,7 +478,7 @@ def forward(self, x, x_mask, g=None, reverse=False): class StochasticDurationPredictor(nn.Module): def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): super().__init__() - filter_channels = in_channels # it needs to be removed from future version. + filter_channels = in_channels # it needs to be removed from future version. self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size @@ -448,8 +533,8 @@ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): z_u, z1 = torch.split(z_q, [1, 1], 1) u = torch.sigmoid(z_u) * x_mask z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) + logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q logdet_tot = 0 z0, logdet = self.log_flow(z0, x_mask) @@ -458,11 +543,11 @@ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): for flow in flows: z, logdet = flow(z, x_mask, g=x, reverse=reverse) logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] + nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot + return nll + logq # [b] else: flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow + flows = flows[:-2] + [flows[-1]] # remove a useless vflow z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale for flow in flows: z = flow(z, x_mask, g=x, reverse=reverse) @@ -482,9 +567,9 @@ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_cha self.gin_channels = gin_channels self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_1 = LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_2 = LayerNorm(filter_channels) self.proj = nn.Conv1d(filter_channels, 1, 1) @@ -509,15 +594,9 @@ def forward(self, x, x_mask, g=None): class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): + def __init__( + self, n_vocab, out_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ): super().__init__() self.n_vocab = n_vocab self.out_channels = out_channels @@ -529,20 +608,14 @@ def __init__(self, self.p_dropout = p_dropout self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) + self.encoder = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) x = self.encoder(x * x_mask, x_mask) @@ -553,14 +626,7 @@ def forward(self, x, x_lengths): class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): + def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0): super().__init__() self.channels = channels self.hidden_channels = hidden_channels @@ -572,7 +638,17 @@ def __init__(self, self.flows = nn.ModuleList() for i in range(n_flows): - self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append( + ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) self.flows.append(Flip()) def forward(self, x, x_mask, g=None, reverse=False): @@ -586,14 +662,9 @@ def forward(self, x, x_mask, g=None, reverse=False): class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): + def __init__( + self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0 + ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels @@ -618,7 +689,17 @@ def forward(self, x, x_lengths, g=None): class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): super(Generator, self).__init__() self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) @@ -627,13 +708,21 @@ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_di self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2 ** i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) + ch = upsample_initial_channel // (2 ** (i + 1)) for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): self.resblocks.append(resblock(ch, k, d)) @@ -654,9 +743,9 @@ def forward(self, x, g=None): xs = None for j in range(self.num_kernels): if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) + xs = self.resblocks[i * self.num_kernels + j](x) else: - xs += self.resblocks[i*self.num_kernels+j](x) + xs += self.resblocks[i * self.num_kernels + j](x) x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) @@ -678,13 +767,15 @@ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): self.period = period self.use_spectral_norm = use_spectral_norm norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) + self.convs = nn.ModuleList( + [ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ] + ) self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) def forward(self, x): @@ -692,7 +783,7 @@ def forward(self, x): # 1d to 2d b, c, t = x.shape - if t % self.period != 0: # pad first + if t % self.period != 0: # pad first n_pad = self.period - (t % self.period) x = F.pad(x, (0, n_pad), "reflect") t = t + n_pad @@ -713,14 +804,16 @@ class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) def forward(self, x): @@ -740,7 +833,7 @@ def forward(self, x): class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] + periods = [2, 3, 5, 7, 11] discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] @@ -767,27 +860,29 @@ class SynthesizerTrn(nn.Module): Synthesizer for Training """ - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): + def __init__( + self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs + ): super().__init__() self.n_vocab = n_vocab @@ -811,16 +906,22 @@ def __init__(self, self.use_sdp = use_sdp - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.enc_p = TextEncoder( + n_vocab, inter_channels, hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels + ) self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) if use_sdp: @@ -835,7 +936,7 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -844,11 +945,15 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): with torch.no_grad(): # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul( + -0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r + ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul( + z_p.transpose(1, 2), (m_p * s_p_sq_r) + ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) @@ -861,7 +966,7 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): else: logw_ = torch.log(w + 1e-6) * x_mask logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging + l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging # expand prior m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) @@ -871,10 +976,10 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): o = self.dec(z_slice, g=g) return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -889,12 +994,14 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) attn = generate_path(w_ceil, attn_mask) - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) + o = self.dec((z * y_mask)[:, :, :max_len], g=g) return o, attn, y_mask, (z, z_p, m_p, logs_p) def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): @@ -907,6 +1014,7 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): o_hat = self.dec(z_hat * y_mask, g=g_tgt) return o_hat, y_mask, (z, z_p, z_hat) + ################## # Mel_processing # ################## @@ -914,6 +1022,7 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): mel_basis = {} hann_window = {} + def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): global mel_basis dtype_device = str(spec.dtype) + '_' + str(spec.device) @@ -927,9 +1036,9 @@ def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: + if torch.min(y) < -1.0: print('min value is ', torch.min(y)) - if torch.max(y) > 1.: + if torch.max(y) > 1.0: print('max value is ', torch.max(y)) global mel_basis, hann_window @@ -942,11 +1051,22 @@ def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fm if wnsize_dtype_device not in hann_window: hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = torch.nn.functional.pad( + y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect' + ) y = y.squeeze(1) - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[wnsize_dtype_device], + center=center, + pad_mode='reflect', + normalized=False, + onesided=True, + ) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) @@ -960,6 +1080,7 @@ def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fm # Commons # ########### + def init_weights(m, mean=0.0, std=0.01): classname = m.__class__.__name__ if classname.find("Conv") != -1: @@ -967,7 +1088,7 @@ def init_weights(m, mean=0.0, std=0.01): def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) + return int((kernel_size * dilation - dilation) / 2) def convert_pad_shape(pad_shape): @@ -985,7 +1106,7 @@ def intersperse(lst, item): def kl_divergence(m_p, logs_p, m_q, logs_q): """KL(P||Q)""" kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) + kl += 0.5 * (torch.exp(2.0 * logs_p) + ((m_p - m_q) ** 2)) * torch.exp(-2.0 * logs_q) return kl @@ -1020,15 +1141,13 @@ def rand_slice_segments(x, x_lengths=None, segment_size=4): return ret, ids_str -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): +def get_timing_signal_1d(length, channels, min_timescale=1.0, max_timescale=1.0e4): position = torch.arange(length, dtype=torch.float) num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) + log_timescale_increment = math.log(float(max_timescale) / float(min_timescale)) / (num_timescales - 1) inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment + ) scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) signal = F.pad(signal, [0, 0, 0, channels % 2]) @@ -1088,12 +1207,12 @@ def generate_path(duration, mask): """ b, _, t_y, t_x = mask.shape cum_duration = torch.cumsum(duration, -1) - + cum_duration_flat = cum_duration.view(b * t_x) path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask + path = path.unsqueeze(1).transpose(2, 3) * mask return path @@ -1111,7 +1230,7 @@ def clip_grad_value_(parameters, clip_value, norm_type=2): total_norm += param_norm.item() ** norm_type if clip_value is not None: p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) + total_norm = total_norm ** (1.0 / norm_type) return total_norm @@ -1119,7 +1238,17 @@ def clip_grad_value_(parameters, clip_value, norm_type=2): # Attentions # ############## class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=4, + **kwargs + ): super().__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels @@ -1135,9 +1264,15 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size + ) + ) self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout) + ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask): @@ -1156,7 +1291,18 @@ def forward(self, x, x_mask): class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + proximal_bias=False, + proximal_init=True, + **kwargs + ): super().__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels @@ -1175,11 +1321,24 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.self_attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + proximal_bias=proximal_bias, + proximal_init=proximal_init, + ) + ) self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.encdec_attn_layers.append( + MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout) + ) self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True) + ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask, h, h_mask): @@ -1187,7 +1346,12 @@ def forward(self, x, x_mask, h, h_mask): x: decoder input h: encoder output """ - self_attn_mask = torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))).unsqueeze(0).unsqueeze(0).to(device=x.device, dtype=x.dtype) + self_attn_mask = ( + torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))) + .unsqueeze(0) + .unsqueeze(0) + .to(device=x.device, dtype=x.dtype) + ) encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask for i in range(self.n_layers): @@ -1198,7 +1362,7 @@ def forward(self, x, x_mask, h, h_mask): y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) y = self.drop(y) x = self.norm_layers_1[i](x + y) - + y = self.ffn_layers[i](x, x_mask) y = self.drop(y) x = self.norm_layers_2[i](x + y) @@ -1207,7 +1371,18 @@ def forward(self, x, x_mask, h, h_mask): class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): super().__init__() assert channels % n_heads == 0 @@ -1231,7 +1406,7 @@ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=No if window_size is not None: n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 + rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) @@ -1242,12 +1417,12 @@ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=No with torch.no_grad(): self.conv_k.weight.copy_(self.conv_q.weight) self.conv_k.bias.copy_(self.conv_q.bias) - + def forward(self, x, c, attn_mask=None): q = self.conv_q(x) k = self.conv_k(c) v = self.conv_v(c) - + x, self.attn = self.attention(q, k, v, mask=attn_mask) x = self.conv_o(x) @@ -1264,7 +1439,7 @@ def attention(self, query, key, value, mask=None): if self.window_size is not None: assert t_s == t_t, "Relative attention is only available for self-attention." key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) + rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings) scores_local = self._relative_position_to_absolute_position(rel_logits) scores = scores + scores_local if self.proximal_bias: @@ -1276,14 +1451,14 @@ def attention(self, query, key, value, mask=None): assert t_s == t_t, "Local attention is only available for self-attention." block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position(p_attn) value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] return output, p_attn def _matmul_with_relative_values(self, x, y): @@ -1311,11 +1486,11 @@ def _get_relative_embeddings(self, relative_embeddings, length): slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad( - relative_embeddings, - convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]) + ) else: padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] + used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): @@ -1325,14 +1500,14 @@ def _relative_position_to_absolute_position(self, x): """ batch, heads, length, _ = x.size() # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) # Concat extra elements so to add up to shape (len+1, 2*len-1). x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, convert_pad_shape([[0,0],[0,0],[0,length-1]])) + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :] return x_final def _absolute_position_to_relative_position(self, x): @@ -1342,11 +1517,11 @@ def _absolute_position_to_relative_position(self, x): """ batch, heads, length, _ = x.size() # padd along column - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) # add 0's in the beginning that will skew the elements after reshape x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): @@ -1362,7 +1537,9 @@ def _attention_bias_proximal(self, length): class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): + def __init__( + self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False + ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels @@ -1390,7 +1567,7 @@ def forward(self, x, x_mask): x = self.drop(x) x = self.conv_2(self.padding(x * x_mask)) return x * x_mask - + def _causal_padding(self, x): if self.kernel_size == 1: return x @@ -1419,59 +1596,57 @@ def _same_padding(self, x): DEFAULT_MIN_DERIVATIVE = 1e-3 -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): +def piecewise_rational_quadratic_transform( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): if tails is None: spline_fn = rational_quadratic_spline spline_kwargs = {} else: spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } + spline_kwargs = {'tails': tails, 'tail_bound': tail_bound} outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs ) return outputs, logabsdet def searchsorted(bin_locations, inputs, eps=1e-6): bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): + return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + + +def unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) outside_interval_mask = ~inside_interval_mask @@ -1495,23 +1670,32 @@ def unconstrained_rational_quadratic_spline(inputs, unnormalized_heights=unnormalized_heights[inside_interval_mask, :], unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + left=-tail_bound, + right=tail_bound, + bottom=-tail_bound, + top=tail_bound, min_bin_width=min_bin_width, min_bin_height=min_bin_height, - min_derivative=min_derivative + min_derivative=min_derivative, ) return outputs, logabsdet -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): + +def rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0.0, + right=1.0, + bottom=0.0, + top=1.0, + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE, +): if torch.min(inputs) < left or torch.max(inputs) > right: raise ValueError('Input to a transform is not within its domain') @@ -1560,15 +1744,13 @@ def rational_quadratic_spline(inputs, input_heights = heights.gather(-1, bin_idx)[..., 0] if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) + a = (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + input_heights * (input_delta - input_derivatives) + b = input_heights * input_derivatives - (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + c = -input_delta * (inputs - input_cumheights) discriminant = b.pow(2) - 4 * a * c assert (discriminant >= 0).all() @@ -1577,11 +1759,14 @@ def rational_quadratic_spline(inputs, outputs = root * input_bin_widths + input_cumwidths theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta + ) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2) + ) logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) return outputs, -logabsdet @@ -1589,15 +1774,17 @@ def rational_quadratic_spline(inputs, theta = (inputs - input_cumwidths) / input_bin_widths theta_one_minus_theta = theta * (1 - theta) - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) + numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta + ) outputs = input_cumheights + numerator / denominator - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2) + ) logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) return outputs, logabsdet From 4ab846314eea66d978fb6bac33c0e7a6f64027c3 Mon Sep 17 00:00:00 2001 From: Jason Date: Thu, 3 Feb 2022 09:00:36 -0800 Subject: [PATCH 065/244] style Signed-off-by: Jason --- nemo/collections/nlp/models/nlp_model.py | 1 - nemo/collections/tts/models/vits.py | 14 +++++++------- .../tts/modules/monotonic_align/__init__.py | 1 + nemo/collections/tts/modules/vits_modules.py | 6 ++---- setup.py | 10 +++++----- 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/nemo/collections/nlp/models/nlp_model.py b/nemo/collections/nlp/models/nlp_model.py index 732c50e97807..a0e5d8205cc5 100644 --- a/nemo/collections/nlp/models/nlp_model.py +++ b/nemo/collections/nlp/models/nlp_model.py @@ -34,7 +34,6 @@ from nemo.core.classes.exportable import Exportable from nemo.utils import AppState, logging - __all__ = ['NLPModel'] NEMO_NLP_TMP = os.path.join(os.path.dirname(str(TRANSFORMERS_CACHE)), "nemo_nlp_tmp") diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 1c19ff3411f8..6b9021e10074 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -12,26 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. +from dataclasses import dataclass +from typing import Any, Dict + import omegaconf import torch -from dataclasses import dataclass from hydra.utils import instantiate from omegaconf import MISSING, DictConfig, OmegaConf from pytorch_lightning import Trainer from torch.cuda.amp import autocast from torch.nn import functional as F -from typing import Any, Dict from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy -from nemo.collections.tts.losses.hifigan_losses import FeatureMatchingLoss, DiscriminatorLoss, GeneratorLoss +from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss from nemo.collections.tts.losses.vits_losses import KlLoss from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import ( - SynthesizerTrn, MultiPeriodDiscriminator, - spec_to_mel_torch, - slice_segments, + SynthesizerTrn, clip_grad_value_, + slice_segments, + spec_to_mel_torch, ) from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging @@ -340,4 +341,3 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': def convert_text_to_waveform(self, *, tokens): # TODO: Convert text to waveforms pass - diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index ff0138663a82..2e2104ef4733 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -36,6 +36,7 @@ import numpy as np import torch + from .core import maximum_path_c diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index f6717d6fd1f8..e6b0d307f036 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -34,20 +34,18 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import numpy as np import math import numpy as np import torch from torch import nn -from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm from nemo.collections.tts.modules.monotonic_align import maximum_path from nemo.collections.tts.modules.vits_mel_processing import librosa_mel_fn, spectral_normalize_torch - LRELU_SLOPE = 0.1 diff --git a/setup.py b/setup.py index 57af56c1e594..a19ecb7efee4 100644 --- a/setup.py +++ b/setup.py @@ -113,11 +113,11 @@ def req_file(filename, folder="requirements"): # Monotonic Align # # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # -setup( - name = 'monotonic_align', - ext_modules = cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), - include_dirs=[numpy.get_include()] -) +# setup( +# name = 'monotonic_align', +# ext_modules = cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), +# include_dirs=[numpy.get_include()] +# ) ############################################################################### # Code style checkers # From 1160fe7771fbb1b26efdb2c9a7d6436d7dec0c3f Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Fri, 4 Feb 2022 11:03:29 +0300 Subject: [PATCH 066/244] rename README Signed-off-by: Oktai Tatanov --- nemo/collections/tts/torch/{readme.md => README.md} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename nemo/collections/tts/torch/{readme.md => README.md} (96%) diff --git a/nemo/collections/tts/torch/readme.md b/nemo/collections/tts/torch/README.md similarity index 96% rename from nemo/collections/tts/torch/readme.md rename to nemo/collections/tts/torch/README.md index 89ec67fcbbf4..90b1644cba8e 100644 --- a/nemo/collections/tts/torch/readme.md +++ b/nemo/collections/tts/torch/README.md @@ -42,7 +42,7 @@ dataloader = torch.utils.data.DataLoader(dataset, 1, collate_fn=dataset._collate pitch_list = [] for batch in tqdm(dataloader, total=len(dataloader)): - tokens, tokens_lengths, audios, audio_lengths, pitches, pitches_lengths = batch + audios, audio_lengths, tokens, tokens_lengths, pitches, pitches_lengths = batch pitch = pitches.squeeze(0) pitch_list.append(pitch[pitch != 0]) From 00ca79e94a78648f041c5a2409f4fd875e5837d0 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Fri, 4 Feb 2022 12:06:42 +0300 Subject: [PATCH 067/244] fix style without vits_modules Signed-off-by: Oktai Tatanov --- examples/tts/vits.py | 1 - nemo/collections/tts/models/vits.py | 8 +- .../tts/modules/monotonic_align/__init__.py | 18 +-- .../tts/modules/vits_mel_processing.py | 124 ------------------ .../tts/ljspeech/get_data.py | 7 - setup.py | 18 +-- 6 files changed, 23 insertions(+), 153 deletions(-) delete mode 100644 nemo/collections/tts/modules/vits_mel_processing.py diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 2ef2404ace05..64937b9219b5 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -14,7 +14,6 @@ import pytorch_lightning as pl from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin - from torch.cuda.amp import GradScaler from nemo.collections.common.callbacks import LogEpochTimeCallback diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 92f2be462ebe..4ec009ed3479 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -14,25 +14,25 @@ import omegaconf import torch +import wandb from hydra.utils import instantiate from omegaconf import DictConfig from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger from torch.cuda.amp import autocast from torch.nn import functional as F -import wandb from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss from nemo.collections.tts.losses.vits_losses import KlLoss from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import ( - SynthesizerTrn, MultiPeriodDiscriminator, + SynthesizerTrn, audio_to_mel_torch, - spec_to_mel_torch, - slice_segments, clip_grad_value_, + slice_segments, + spec_to_mel_torch, ) from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging, model_utils diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 4bd9dbd46c8a..2e2104ef4733 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -41,16 +41,16 @@ def maximum_path(neg_cent, mask): - """ Cython optimized version. + """ Cython optimized version. neg_cent: [b, t_t, t_s] mask: [b, t_t, t_s] """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/nemo/collections/tts/modules/vits_mel_processing.py b/nemo/collections/tts/modules/vits_mel_processing.py deleted file mode 100644 index 0858228b8611..000000000000 --- a/nemo/collections/tts/modules/vits_mel_processing.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# MIT License -# -# Copyright (c) 2021 Jaehyeon Kim -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.0: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.0: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad( - y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode='reflect' - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode='reflect', - normalized=False, - onesided=True, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - spec = spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center) - melspec = spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax) - return melspec diff --git a/scripts/dataset_processing/tts/ljspeech/get_data.py b/scripts/dataset_processing/tts/ljspeech/get_data.py index 96dde3ff1988..1f9ee2d8b71e 100644 --- a/scripts/dataset_processing/tts/ljspeech/get_data.py +++ b/scripts/dataset_processing/tts/ljspeech/get_data.py @@ -20,13 +20,6 @@ import sox import wget -<<<<<<< HEAD - -import sys -# sys.path.append("/nemo_text_processing/text_normalization/") -# sys.path.insert(0, '/nemo_text_processing/text_normalization/normalize/') -======= ->>>>>>> main from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm diff --git a/setup.py b/setup.py index d4303e9fad3c..60dade153115 100644 --- a/setup.py +++ b/setup.py @@ -25,9 +25,10 @@ from itertools import chain import importlib.util -from distutils.core import setup -from Cython.Build import cythonize -import numpy +# TODO: need to discuss how to do it correctly +# from distutils.core import setup +# from Cython.Build import cythonize +# import numpy import setuptools @@ -113,11 +114,12 @@ def req_file(filename, folder="requirements"): # Monotonic Align # # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # -setup( - name='monotonic_align', - ext_modules=cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), - include_dirs=[numpy.get_include()], -) +# TODO: need to discuss how to do it correctly +# setup( +# name='monotonic_align', +# ext_modules=cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), +# include_dirs=[numpy.get_include()], +# ) ############################################################################### # Code style checkers # From 70252704aeae95c07c13f40b21d5046867b64b59 Mon Sep 17 00:00:00 2001 From: Oktai Tatanov Date: Fri, 4 Feb 2022 12:37:45 +0300 Subject: [PATCH 068/244] add numba code, fix style and add todos Signed-off-by: Oktai Tatanov --- nemo/collections/tts/models/vits.py | 6 ++- .../tts/modules/monotonic_align/__init__.py | 2 + .../tts/modules/monotonic_align/numba_core.py | 52 +++++++++++++++++++ nemo/collections/tts/modules/vits_modules.py | 2 +- 4 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 nemo/collections/tts/modules/monotonic_align/numba_core.py diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 4ec009ed3479..e8ef483cc7fc 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -84,7 +84,8 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.win_length = cfg.train_ds.dataset.win_length # TODO: need to add SynthesizerTrn in config - # TODO: how model knows padding idx? num tokens? + # TODO: how model knows padding idx? Need to use self.tokenizer_pad + # TODO: n_vocab != cfg.symbols_embedding_dim. Need replace cfg.symbols_embedding_dim with num_tokens self.net_g = SynthesizerTrn( n_vocab=cfg.symbols_embedding_dim, spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, @@ -176,7 +177,7 @@ def configure_optimizers(self): return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] def forward(self, batch, batch_idx): - # TODO: Check if this is correct + # TODO: Check if this is correct forward with torch.no_grad(): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -263,6 +264,7 @@ def training_step(self, batch, batch_idx): optim_g.zero_grad() self.manual_backward(loss_gen_all) optim_g.step() + # TODO: maybe change it to PTL-based function norm_g = clip_grad_value_(self.net_g.parameters(), None) schedulers = self.lr_schedulers() diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 2e2104ef4733..3076ae137997 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -37,7 +37,9 @@ import numpy as np import torch +# TODO: try to use numbda_core instead of C++ core from .core import maximum_path_c +# from .numba_code import maximum_path_c def maximum_path(neg_cent, mask): diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py new file mode 100644 index 000000000000..1d23c232b572 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -0,0 +1,52 @@ +import numba + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): + """ + Args: + path: int32[:, :] + value: float32[:, :] + t_y: int + t_x: int + max_neg_val: float + """ + index: int = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0.0 + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_c(paths, values, t_ys, t_xs): + """ + Args: + paths: int32[:, :, :] + values: float32[:, :, :] + t_ys: int[:] + t_xs: int[:] + """ + b: int = paths.shape[0] + for i in numba.prange(b): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + + +if __name__ == '__main__': + pass \ No newline at end of file diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 60ae74f0e9ac..113d95c24d76 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -574,7 +574,7 @@ def __init__(self, self.kernel_size = kernel_size self.p_dropout = p_dropout - # TODO: specify padding idx + # TODO: add padding idx in __init__, specify padding idx in self.emb self.emb = nn.Embedding(n_vocab, hidden_channels) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) From 3d17d6f9ef45dba807a5303a766acafbceb61121 Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 7 Feb 2022 02:31:35 -0800 Subject: [PATCH 069/244] small fix --- examples/tts/conf/vits.yaml | 12 ++++++------ nemo/collections/tts/models/vits.py | 7 ++++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index e9da99edfaaa..70fd29669bc9 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -250,24 +250,24 @@ trainer: accelerator: gpu strategy: ddp precision: 16 - max_epochs: 1000 + max_epochs: 1000000 accumulate_grad_batches: 1 # gradient_clip_val: 1000.0 checkpoint_callback: false # Provided by exp_manager logger: false # Provided by exp_manager - log_every_n_steps: 10 + log_every_n_steps: 20 flush_logs_every_n_steps: 1000 - check_val_every_n_epoch: 5 + check_val_every_n_epoch: 2 exp_manager: exp_dir: first_run_vits name: ${name} - create_tensorboard_logger: true + create_tensorboard_logger: false create_checkpoint_callback: true checkpoint_callback_params: - monitor: val_mel_loss + monitor: loss_gen_all mode: min - create_wandb_logger: false + create_wandb_logger: true wandb_logger_kwargs: name: ${name} project: VITS diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index ca7c6adce2ee..eaecd5d8116d 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -354,7 +354,8 @@ def validation_step(self, batch, batch_idx): y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) # plot audio once per epoch - if batch_idx == 0 and self.logger is not None and isinstance(self.logger, WandbLogger): + if batch_idx == 0 and self.logger is not None:# and isinstance(self.logger, WandbLogger): + print('logging media') specs = [] audios = [] @@ -371,12 +372,12 @@ def validation_step(self, batch, batch_idx): audios += [ wandb.Audio( - y[0, : y_lengths[0]].data.cpu().numpy(), + y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), caption=f"val_wav_target", sample_rate=self.sample_rate, ), wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().numpy(), + y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), caption=f"val_wav_predicted", sample_rate=self.sample_rate, ), From 9700bbe085b54d4f908dd29adc39a149b5eb2f0d Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 7 Feb 2022 11:38:31 -0800 Subject: [PATCH 070/244] fix some todos --- examples/tts/conf/vits.yaml | 5 ++- nemo/collections/tts/models/vits.py | 34 ++++++++------------ nemo/collections/tts/modules/vits_modules.py | 2 +- 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 30b3be04700b..83d86a7303ae 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -157,13 +157,12 @@ model: dataloader_params: drop_last: false - shuffle: true + shuffle: false batch_size: 16 - num_workers: 1 + num_workers: 4 pin_memory: false preprocessor: - # TODO: change to STFT _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures nfilt: ${model.n_mel_channels} highfreq: ${model.highfreq} diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index ec736b88c709..3c6542a7e2cf 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from nemo.core import typecheck + +# typecheck.set_typecheck_enabled(False) + import omegaconf import torch import wandb @@ -37,12 +41,6 @@ from nemo.core.classes.common import PretrainedModelInfo from nemo.utils import logging, model_utils -# TODO: remove if not needed -# to call optimizer_step -# def closure(): -# return - - class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Convert to Hydra 1.0 compatible DictConfig @@ -85,9 +83,8 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # TODO: need to add SynthesizerTrn in config # TODO: how model knows padding idx? Need to use self.tokenizer_pad - # TODO: n_vocab != cfg.symbols_embedding_dim. Need replace cfg.symbols_embedding_dim with num_tokens self.net_g = SynthesizerTrn( - n_vocab=cfg.symbols_embedding_dim, + n_vocab=num_tokens, spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, inter_channels=cfg.inter_channels, @@ -123,9 +120,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, ) - # TODO: remove if not needed - # self.precision_plugin = self.trainer.accelerator.precision_plugin # to call optimizer_step - def _setup_normalizer(self, cfg): if "text_normalizer" in cfg: normalizer_kwargs = {} @@ -167,17 +161,18 @@ def configure_optimizers(self): optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(self.optim_g, gamma=self._cfg.lr_decay) + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) scheduler_g_dict = { 'scheduler': scheduler_g, 'interval': 'step', } - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(self.optim_d, gamma=self._cfg.lr_decay) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] def forward(self, batch, batch_idx): # TODO: Check if this is correct forward + # only for inference with torch.no_grad(): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -236,7 +231,8 @@ def training_step(self, batch, batch_idx): y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(y_d_hat_r, y_d_hat_g) + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + disc_generated_outputs=y_d_hat_g) loss_disc_all = loss_disc # get optimizers @@ -252,11 +248,12 @@ def training_step(self, batch, batch_idx): with autocast(enabled=True): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r.detach(), fmap_g=fmap_g) + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl @@ -308,8 +305,7 @@ def validation_step(self, batch, batch_idx): y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) # plot audio once per epoch - if batch_idx == 0 and self.logger is not None:# and isinstance(self.logger, WandbLogger): - print('logging media') + if batch_idx == 0 and self.logger is not None and isinstance(self.logger, WandbLogger): specs = [] audios = [] @@ -325,13 +321,9 @@ def validation_step(self, batch, batch_idx): audios += [ wandb.Audio( -<<<<<<< HEAD y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), caption=f"val_wav_target", sample_rate=self.sample_rate, -======= - y[0, : y_lengths[0]].data.cpu().numpy(), caption=f"val_wav_target", sample_rate=self.sample_rate, ->>>>>>> 70252704aeae95c07c13f40b21d5046867b64b59 ), wandb.Audio( y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 113d95c24d76..008ba1140d30 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -944,7 +944,7 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca o = self.dec((z * y_mask)[:,:,:max_len], g=g) return o, attn, y_mask, (z, z_p, m_p, logs_p) - # TODO: do we really need it? + # TODO: do we really need it? Can be used for emotions conversion def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): assert self.n_speakers > 0, "n_speakers have to be larger than 0." g_src = self.emb_g(sid_src).unsqueeze(-1) From 24df2c50d10d75b925908ff098b04d94d9b82b2b Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 14 Feb 2022 03:01:05 -0800 Subject: [PATCH 071/244] added numba mas --- nemo/collections/tts/modules/monotonic_align/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 3076ae137997..8b8fe78c7d2f 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -37,9 +37,9 @@ import numpy as np import torch -# TODO: try to use numbda_core instead of C++ core +# TODO: try to use numbda_core instead of C++ core (done) +# from .numba_core import maximum_path_c from .core import maximum_path_c -# from .numba_code import maximum_path_c def maximum_path(neg_cent, mask): From 35b0cd494313e7b8df3a53e199fa23dc2d2d464e Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 14 Feb 2022 03:05:39 -0800 Subject: [PATCH 072/244] added DDP sampler --- examples/tts/conf/vits.yaml | 33 ++++--- examples/tts/vits.py | 4 +- nemo/collections/tts/helpers/helpers.py | 98 ++++++++++++++++++++ nemo/collections/tts/models/vits.py | 30 ++++-- nemo/collections/tts/modules/vits_modules.py | 10 +- nemo/collections/tts/torch/data.py | 6 +- 6 files changed, 156 insertions(+), 25 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 83d86a7303ae..0fcc8e56a778 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -6,8 +6,9 @@ name: VITS -train_dataset: "ljspeech_ds/LJSpeech-1.1/train_manifest.json" -validation_datasets: "ljspeech_ds/LJSpeech-1.1/val_manifest.json" + +train_dataset: "../datasets/ljspeech_ds/LJSpeech-1.1/train_manifest.json" +validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" sup_data_path: null sup_data_types: null @@ -125,14 +126,21 @@ model: pitch_fmin: ${model.pitch_fmin} pitch_fmax: ${model.pitch_fmax} - + dataloader_params: - drop_last: false - shuffle: true - batch_size: 16 + # drop_last: false + # shuffle: true + # batch_size: 32 num_workers: 4 pin_memory: false + batch_sampler: + batch_size: 32 + boundaries: [32,300,400,500,600,700,800,900,1000] + num_replicas: ${trainer.devices} + shuffle: true + + validation_ds: dataset: _target_: "nemo.collections.tts.torch.data.TTSDataset" @@ -154,7 +162,6 @@ model: pitch_fmin: ${model.pitch_fmin} pitch_fmax: ${model.pitch_fmax} - dataloader_params: drop_last: false shuffle: false @@ -199,7 +206,7 @@ model: trainer: num_nodes: 1 - devices: 4 + devices: 2 accelerator: gpu strategy: ddp precision: 16 @@ -213,17 +220,17 @@ trainer: check_val_every_n_epoch: 2 exp_manager: - exp_dir: first_run_vits + exp_dir: ../local_exps/vits_ddp name: ${name} - create_tensorboard_logger: false + create_tensorboard_logger: true create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: true + create_wandb_logger: false wandb_logger_kwargs: - name: ${name} - project: VITS + name: VITS_chars + project: ${name} entity: treacker resume_if_exists: false resume_ignore_no_checkpoint: false diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 64937b9219b5..72cb7c33de8c 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -22,6 +22,7 @@ from nemo.utils.exp_manager import exp_manager + @hydra_runner(config_path="conf", config_name="vits") def main(cfg): plugins = [] @@ -29,7 +30,8 @@ def main(cfg): scaler = GradScaler(enabled=True) plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) - trainer = pl.Trainer(plugins=plugins, **cfg.trainer) + trainer = pl.Trainer(plugins=plugins, replace_sampler_ddp=False, **cfg.trainer) + # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index 73d6562fb44a..2f2700fcc6aa 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -492,3 +492,101 @@ def split_view(tensor, split_size, dim=0): cur_shape = tensor.shape new_shape = cur_shape[:dim] + (tensor.shape[dim] // split_size, split_size) + cur_shape[dim + 1 :] return tensor.reshape(*new_shape) + +class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): + """ + Maintain similar input lengths in a batch. + Length groups are specified by boundaries. + Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. + + It removes samples which are not included in the boundaries. + Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. + """ + def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): + super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + self.lengths = dataset.lengths + self.batch_size = batch_size + self.boundaries = boundaries + + self.buckets, self.num_samples_per_bucket = self._create_buckets() + self.total_size = sum(self.num_samples_per_bucket) + self.num_samples = self.total_size // self.num_replicas + + def _create_buckets(self): + buckets = [[] for _ in range(len(self.boundaries) - 1)] + for i in range(len(self.lengths)): + length = self.lengths[i] + idx_bucket = self._bisect(length) + if idx_bucket != -1: + buckets[idx_bucket].append(i) + + for i in range(len(buckets) - 1, 0, -1): + if len(buckets[i]) == 0: + buckets.pop(i) + self.boundaries.pop(i+1) + + num_samples_per_bucket = [] + for i in range(len(buckets)): + len_bucket = len(buckets[i]) + total_batch_size = self.num_replicas * self.batch_size + rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size + num_samples_per_bucket.append(len_bucket + rem) + return buckets, num_samples_per_bucket + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + + indices = [] + if self.shuffle: + for bucket in self.buckets: + indices.append(torch.randperm(len(bucket), generator=g).tolist()) + else: + for bucket in self.buckets: + indices.append(list(range(len(bucket)))) + + batches = [] + for i in range(len(self.buckets)): + bucket = self.buckets[i] + len_bucket = len(bucket) + ids_bucket = indices[i] + num_samples_bucket = self.num_samples_per_bucket[i] + + # add extra samples to make it evenly divisible + rem = num_samples_bucket - len_bucket + ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] + + # subsample + ids_bucket = ids_bucket[self.rank::self.num_replicas] + + # batching + for j in range(len(ids_bucket) // self.batch_size): + batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] + batches.append(batch) + + if self.shuffle: + batch_ids = torch.randperm(len(batches), generator=g).tolist() + batches = [batches[i] for i in batch_ids] + self.batches = batches + + assert len(self.batches) * self.batch_size == self.num_samples + return iter(self.batches) + + def _bisect(self, x, lo=0, hi=None): + if hi is None: + hi = len(self.boundaries) - 1 + + if hi > lo: + mid = (hi + lo) // 2 + if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: + return mid + elif x <= self.boundaries[mid]: + return self._bisect(x, lo, mid) + else: + return self._bisect(x, mid + 1, hi) + else: + return -1 + + def __len__(self): + return self.num_samples // self.batch_size \ No newline at end of file diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 3c6542a7e2cf..00b2bdd41f36 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -26,9 +26,9 @@ from torch.cuda.amp import autocast from torch.nn import functional as F -from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy -from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss -from nemo.collections.tts.losses.vits_losses import KlLoss +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler +from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, GeneratorLoss +from nemo.collections.tts.losses.vits_losses import KlLoss, FeatureMatchingLoss from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import ( MultiPeriodDiscriminator, @@ -44,6 +44,7 @@ class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Convert to Hydra 1.0 compatible DictConfig + cfg = model_utils.convert_model_config_to_dict_config(cfg) cfg = model_utils.maybe_update_config_version(cfg) @@ -66,7 +67,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) - self.multiperioddisc = MultiPeriodDiscriminator() self.feat_matching_loss = FeatureMatchingLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() @@ -82,7 +82,6 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.win_length = cfg.train_ds.dataset.win_length # TODO: need to add SynthesizerTrn in config - # TODO: how model knows padding idx? Need to use self.tokenizer_pad self.net_g = SynthesizerTrn( n_vocab=num_tokens, spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, @@ -94,6 +93,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): n_layers=cfg.n_layers, kernel_size=cfg.pitch_embedding_kernel_size, p_dropout=cfg.p_dropout, + padding_idx=self.tokenizer_pad, resblock=cfg.generator.resblock, resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, @@ -248,7 +248,6 @@ def training_step(self, batch, batch_idx): with autocast(enabled=True): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) - with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel @@ -352,6 +351,25 @@ def _loader(self, cfg): dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, ) + def train_dataloader(self): + # default used by the Trainer + dataset = instantiate( + self.cfg.train_ds.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + + train_sampler = DistributedBucketSampler( + dataset, + self.cfg.train_ds.batch_sampler.batch_size, + [32,300,400,500,600,700,800,900,1000], + shuffle=True) + dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, + **self.cfg.train_ds.dataloader_params,) + print('made ddp loader') + return dataloader + def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 008ba1140d30..3aa6edd4f01f 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -563,7 +563,8 @@ def __init__(self, n_heads, n_layers, kernel_size, - p_dropout): + p_dropout, + padding_idx): super().__init__() self.n_vocab = n_vocab self.out_channels = out_channels @@ -575,7 +576,7 @@ def __init__(self, self.p_dropout = p_dropout # TODO: add padding idx in __init__, specify padding idx in self.emb - self.emb = nn.Embedding(n_vocab, hidden_channels) + self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) self.encoder = Encoder( @@ -825,6 +826,7 @@ def __init__(self, n_layers, kernel_size, p_dropout, + padding_idx, resblock, resblock_kernel_sizes, resblock_dilation_sizes, @@ -846,6 +848,7 @@ def __init__(self, self.n_layers = n_layers self.kernel_size = kernel_size self.p_dropout = p_dropout + self.padding_idx = padding_idx self.resblock = resblock self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes @@ -865,7 +868,8 @@ def __init__(self, n_heads, n_layers, kernel_size, - p_dropout) + p_dropout, + padding_idx) self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 349867e4a143..42dcc854f410 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -15,6 +15,7 @@ import json import math +import os import pickle import random from pathlib import Path @@ -160,6 +161,7 @@ def __init__( if isinstance(manifest_filepath, str): manifest_filepath = [manifest_filepath] self.manifest_filepath = manifest_filepath + self.lengths = [] data = [] total_duration = 0 @@ -190,7 +192,7 @@ def __init__( file_info["text_tokens"] = self.text_tokenizer(item["normalized_text"]) data.append(file_info) - + self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) if file_info["duration"] is None: logging.info( "Not all audio files have duration information. Duration logging will be disabled." @@ -222,7 +224,7 @@ def __init__( self.hop_len = self.hop_length or self.n_fft // 4 self.fb = torch.tensor( librosa.filters.mel( - self.sample_rate, self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq + sr=self.sample_rate, n_fft=self.n_fft, n_mels=self.n_mels, fmin=self.lowfreq, fmax=self.highfreq ), dtype=torch.float, ).unsqueeze(0) From 6e436d0f9f7249b4a30eff8faf4bb9c5d11f1881 Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 14 Feb 2022 03:06:05 -0800 Subject: [PATCH 073/244] specified versions --- reinstall.sh | 2 +- requirements/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/reinstall.sh b/reinstall.sh index 808f7685bc1f..73db085c2a78 100755 --- a/reinstall.sh +++ b/reinstall.sh @@ -17,7 +17,7 @@ ${PIP} uninstall -y nemo_tts ${PIP} uninstall -y nemo_simple_gan ${PIP} uninstall -y nemo_cv -${PIP} install -U setuptools +${PIP} install -U setuptools==59.5.0 echo 'Installing nemo and nemo_text_processing' if [[ "$INSTALL_OPTION" == "dev" ]]; then diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 46007856b7e8..be912b83b39d 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,4 +1,4 @@ -numpy>=1.21 +numpy==1.21 onnx>=1.7.0 python-dateutil torch From 9f6ff8f0a9ee1ad29158dcf8d7d497a66a6c8ce2 Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 14 Feb 2022 03:06:30 -0800 Subject: [PATCH 074/244] fixed for new librosa version --- nemo/collections/asr/parts/preprocessing/features.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index c706f9d6362a..f9a488f3a8ac 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -311,7 +311,7 @@ def __init__( highfreq = highfreq or sample_rate / 2 filterbanks = torch.tensor( - librosa.filters.mel(sample_rate, self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float + librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float ).unsqueeze(0) self.register_buffer("fb", filterbanks) From 97cd6cc28d18bc576c2b7d56f3f4944fe723fa29 Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 14 Feb 2022 03:06:53 -0800 Subject: [PATCH 075/244] added feature loss --- nemo/collections/tts/losses/vits_losses.py | 30 ++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index dc24ce249e64..388f0ebd0933 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -78,3 +78,33 @@ def forward(self, z_p, logs_q, m_p, logs_p, z_mask): kl = torch.sum(kl * z_mask) l = kl / torch.sum(z_mask) return l + +class FeatureMatchingLoss(Loss): + """VITS Feature Matching Loss module""" + + @property + def input_types(self): + return { + "fmap_r": [[NeuralType(elements_type=VoidType())]], + "fmap_g": [[NeuralType(elements_type=VoidType())]], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + @typecheck() + def forward(self, fmap_r, fmap_g): + """ + fmap_r, fmap_g: List[List[Tensor]] + """ + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 \ No newline at end of file From 7c01dca1ebe8c16153485fc9c32a05a171f01e1c Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 21 Feb 2022 01:03:48 -0800 Subject: [PATCH 076/244] added IPA phonemizer --- examples/tts/conf/vits.yaml | 70 +++---- examples/tts/vits.py | 1 - nemo/collections/tts/models/vits.py | 2 +- .../tts/modules/monotonic_align/__init__.py | 6 +- nemo/collections/tts/torch/en_utils.py | 33 +++- nemo/collections/tts/torch/g2ps.py | 183 ++++++++++++++++++ nemo/collections/tts/torch/tts_tokenizers.py | 114 +++++++++++ 7 files changed, 359 insertions(+), 50 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 0fcc8e56a778..a865bfaab40c 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -12,27 +12,6 @@ validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" sup_data_path: null sup_data_types: null -# Default values from librosa.pyin -pitch_fmin: 65.40639132514966 -pitch_fmax: 2093.004522404789 - -# LJSpeech stats (per frame), train -pitch_mean: 212.35873413085938 -pitch_std: 68.52806091308594 - -# default values for sample_rate=22050 -sample_rate: 22050 -n_mel_channels: 80 -n_window_size: 1024 -n_window_stride: 256 -n_fft: 1024 -lowfreq: 0 -highfreq: 8000 -window: hann - -pitch_loss_scale: 0.1 -durs_loss_scale: 0.1 -mel_loss_scale: 1.0 phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" @@ -42,17 +21,17 @@ filter_channels: 768 filter_length: 1024 model: - pitch_fmin: ${pitch_fmin} - pitch_fmax: ${pitch_fmax} - - sample_rate: ${sample_rate} - n_mel_channels: ${n_mel_channels} - n_window_size: ${n_window_size} - n_window_stride: ${n_window_stride} - n_fft: ${n_fft} - lowfreq: ${lowfreq} - highfreq: ${highfreq} - window: ${window} + pitch_fmin: 65.40639132514966 + pitch_fmax: 2093.004522404789 + + sample_rate: 22050 + n_mel_channels: 80 + n_window_size: 1024 + n_window_stride: 256 + n_fft: 1024 + lowfreq: 0 + highfreq: 8000 + window: hann splice_length: 64 lr: 2e-4 @@ -94,16 +73,21 @@ model: punct_post_process: true text_tokenizer: - _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer + _target_: nemo.collections.tts.torch.tts_tokenizers.IPAPhonemesTokenizer punct: true stresses: true chars: true apostrophe: true pad_with_space: true - g2p: - _target_: nemo.collections.tts.torch.g2ps.EnglishG2p - phoneme_dict: ${phoneme_dict_path} - heteronyms: ${heteronyms_path} + g2p: null + # _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + # phoneme_dict: ${phoneme_dict_path} + # heteronyms: ${heteronyms_path} + # text_tokenizer: + # _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer + # punct: true + # apostrophe: true + # pad_with_space: true train_ds: dataset: @@ -135,7 +119,7 @@ model: pin_memory: false batch_sampler: - batch_size: 32 + batch_size: 128 boundaries: [32,300,400,500,600,700,800,900,1000] num_replicas: ${trainer.devices} shuffle: true @@ -206,7 +190,7 @@ model: trainer: num_nodes: 1 - devices: 2 + devices: 1 accelerator: gpu strategy: ddp precision: 16 @@ -215,12 +199,12 @@ trainer: # gradient_clip_val: 1000.0 checkpoint_callback: false # Provided by exp_manager logger: false # Provided by exp_manager - log_every_n_steps: 20 + log_every_n_steps: 50 flush_logs_every_n_steps: 1000 check_val_every_n_epoch: 2 exp_manager: - exp_dir: ../local_exps/vits_ddp + exp_dir: vits_char name: ${name} create_tensorboard_logger: true create_checkpoint_callback: true @@ -229,8 +213,8 @@ exp_manager: mode: min create_wandb_logger: false wandb_logger_kwargs: - name: VITS_chars + name: VITS_char project: ${name} - entity: treacker + entity: treacker15 resume_if_exists: false resume_ignore_no_checkpoint: false diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 72cb7c33de8c..88996500786d 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -22,7 +22,6 @@ from nemo.utils.exp_manager import exp_manager - @hydra_runner(config_path="conf", config_name="vits") def main(cfg): plugins = [] diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 00b2bdd41f36..7e0bf8053b53 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -136,7 +136,7 @@ def _setup_normalizer(self, cfg): def _setup_tokenizer(self, cfg): text_tokenizer_kwargs = {} - if "g2p" in cfg.text_tokenizer: + if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: g2p_kwargs = {} if "phoneme_dict" in cfg.text_tokenizer.g2p: diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 8b8fe78c7d2f..4ab8442858bf 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -26,7 +26,6 @@ # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER @@ -37,9 +36,8 @@ import numpy as np import torch -# TODO: try to use numbda_core instead of C++ core (done) -# from .numba_core import maximum_path_c -from .core import maximum_path_c +from .numba_core import maximum_path_c +# from .core import maximum_path_c def maximum_path(neg_cent, mask): diff --git a/nemo/collections/tts/torch/en_utils.py b/nemo/collections/tts/torch/en_utils.py index 632030261356..e026baaa2874 100644 --- a/nemo/collections/tts/torch/en_utils.py +++ b/nemo/collections/tts/torch/en_utils.py @@ -24,6 +24,29 @@ } SYNOGLYPH2ASCII = {g: asc for asc, glyphs in _synoglyphs.items() for g in glyphs} +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ + ('mrs', 'misess'), + ('mr', 'mister'), + ('dr', 'doctor'), + ('st', 'saint'), + ('co', 'company'), + ('jr', 'junior'), + ('maj', 'major'), + ('gen', 'general'), + ('drs', 'doctors'), + ('rev', 'reverend'), + ('lt', 'lieutenant'), + ('hon', 'honorable'), + ('sgt', 'sergeant'), + ('capt', 'captain'), + ('esq', 'esquire'), + ('ltd', 'limited'), + ('col', 'colonel'), + ('ft', 'fort'), +]] + + # Example of parsing by groups via _WORDS_RE. # Groups: # 1st group -- valid english words, @@ -33,9 +56,14 @@ # config file must contain |EY1 EY1|, B, C, D, E, F, and G. # 111111311113111131111111322222222233133133133133133111313 _WORDS_RE = re.compile("([a-zA-Z]+(?:[a-zA-Z-']*[a-zA-Z]+)*)|(\|[^|]*\|)|([^a-zA-Z|]+)") +_whitespace_re = re.compile(r'\s+') +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text -def english_text_preprocessing(text, lower=True): +def english_text_preprocessing(text, lower=True, abbreviations=True): text = unicode(text) text = ''.join(char for char in unicodedata.normalize('NFD', text) if unicodedata.category(char) != 'Mn') text = ''.join(char if char not in SYNOGLYPH2ASCII else SYNOGLYPH2ASCII[char] for char in text) @@ -43,6 +71,9 @@ def english_text_preprocessing(text, lower=True): if lower: text = text.lower() + if abbreviations: + text = expand_abbreviations(text) + return text diff --git a/nemo/collections/tts/torch/g2ps.py b/nemo/collections/tts/torch/g2ps.py index ac9e59d09f87..594c66d99186 100644 --- a/nemo/collections/tts/torch/g2ps.py +++ b/nemo/collections/tts/torch/g2ps.py @@ -25,6 +25,9 @@ from nemo.utils.get_rank import is_global_rank_zero +_alt_re = re.compile(r'\([0-9]+\)') + + class BaseG2p(abc.ABC): def __init__( self, phoneme_dict=None, word_tokenize_func=lambda x: x, apply_to_oov_word=None, @@ -224,3 +227,183 @@ def __call__(self, text): prons.extend(pron) return prons + + +class IPAG2p(BaseG2p): + def __init__( + self, + phoneme_dict=None, + word_tokenize_func=english_word_tokenize, + apply_to_oov_word=None, + ignore_ambiguous_words=True, + heteronyms=None, + encoding='latin-1', + ): + """IPA G2P module. This module converts words from grapheme to phoneme representation using phoneme_dict in CMU dict format. + Optionally, it can ignore words which are heteronyms, ambiguous or marked as unchangeable by word_tokenize_func (see code for details). + Ignored words are left unchanged or passed through apply_to_oov_word. + Args: + phoneme_dict (str, Path, Dict): Path to file in CMU dict format or dictionary in CMU dict. + word_tokenize_func: Function for tokenizing text to words. + It has to return List[Tuple[Union[str, List[str]], bool]] where every tuple denotes word representation and flag whether to leave unchanged or not. + It is expected that unchangeable word representation will be represented as List[str], other cases are represented as str. + It is useful to mark word as unchangeable which is already in phoneme representation. + apply_to_oov_word: Function that will be applied to out of phoneme_dict word. + ignore_ambiguous_words: Whether to not handle word via phoneme_dict with ambiguous phoneme sequences. Defaults to True. + heteronyms (str, Path, List): Path to file with heteronyms (every line is new word) or list of words. + encoding: Encoding type. + """ + phoneme_dict = ( + + ) + + if apply_to_oov_word is None: + logging.warning( + "apply_to_oov_word=None, it means that some of words will remain unchanged " + "if they are not handled by one of rule in self.parse_one_word(). " + "It is useful when you use tokenizer with set of phonemes and chars together, otherwise it can be not." + ) + + super().__init__( + phoneme_dict=phoneme_dict, word_tokenize_func=word_tokenize_func, apply_to_oov_word=apply_to_oov_word, + ) + + self.ignore_ambiguous_words = ignore_ambiguous_words + self.heteronyms = ( + set(self._parse_file_by_lines(heteronyms, encoding)) + if isinstance(heteronyms, str) or isinstance(heteronyms, pathlib.Path) + else heteronyms + ) + + @staticmethod + def _parse_as_cmu_dict(phoneme_dict_path=None, encoding='latin-1'): + if phoneme_dict_path is None: + # this part of code downloads file, but it is not rank zero guarded + # Try to check if torch distributed is available, if not get global rank zero to download corpora and make + # all other ranks sleep for a minute + if torch.distributed.is_available() and torch.distributed.is_initialized(): + group = torch.distributed.group.WORLD + if is_global_rank_zero(): + try: + nltk.data.find('corpora/cmudict.zip') + except LookupError: + nltk.download('cmudict', quiet=True) + torch.distributed.barrier(group=group) + elif is_global_rank_zero(): + logging.error( + f"Torch distributed needs to be initialized before you initialized EnglishG2p. This class is prone to " + "data access race conditions. Now downloading corpora from global rank 0. If other ranks pass this " + "before rank 0, errors might result." + ) + try: + nltk.data.find('corpora/cmudict.zip') + except LookupError: + nltk.download('cmudict', quiet=True) + else: + logging.error( + f"Torch distributed needs to be initialized before you initialized EnglishG2p. This class is prone to " + "data access race conditions. This process is not rank 0, and now going to sleep for 1 min. If this " + "rank wakes from sleep prior to rank 0 finishing downloading, errors might result." + ) + time.sleep(60) + + logging.warning( + f"English g2p_dict will be used from nltk.corpus.cmudict.dict(), because phoneme_dict_path=None. " + "Note that nltk.corpus.cmudict.dict() has old version (0.6) of CMUDict. " + "You can use the latest official version of CMUDict (0.7b) with additional changes from NVIDIA directly from NeMo " + "using the path scripts/tts_dataset_files/cmudict-0.7b_nv22.01." + ) + + return nltk.corpus.cmudict.dict() + + _alt_re = re.compile(r'\([0-9]+\)') + g2p_dict = {} + with open(phoneme_dict_path, encoding=encoding) as file: + for line in file: + if len(line) and ('A' <= line[0] <= 'Z' or line[0] == "'"): + parts = line.split(' ') + word = re.sub(_alt_re, '', parts[0]) + word = word.lower() + + pronunciation = parts[1].strip().split(" ") + if word in g2p_dict: + g2p_dict[word].append(pronunciation) + else: + g2p_dict[word] = [pronunciation] + return g2p_dict + + @staticmethod + def _parse_file_by_lines(p, encoding): + with open(p, encoding=encoding) as f: + return [l.rstrip() for l in f.readlines()] + + def is_unique_in_phoneme_dict(self, word): + return len(self.phoneme_dict[word]) == 1 + + def parse_one_word(self, word: str): + """ + Returns parsed `word` and `status` as bool. + `status` will be `False` if word wasn't handled, `True` otherwise. + """ + + # punctuation + if re.search("[a-zA-Z]", word) is None: + return list(word), True + + # heteronym + if self.heteronyms is not None and word in self.heteronyms: + return word, True + + # `'s` suffix + if ( + len(word) > 2 + and word.endswith("'s") + and (word not in self.phoneme_dict) + and (word[:-2] in self.phoneme_dict) + and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-2])) + ): + return self.phoneme_dict[word[:-2]][0] + ["Z"], True + + # `s` suffix + if ( + len(word) > 1 + and word.endswith("s") + and (word not in self.phoneme_dict) + and (word[:-1] in self.phoneme_dict) + and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-1])) + ): + return self.phoneme_dict[word[:-1]][0] + ["Z"], True + + # phoneme dict + if word in self.phoneme_dict and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word)): + return self.phoneme_dict[word][0], True + + if self.apply_to_oov_word is not None: + return self.apply_to_oov_word(word), False + else: + return word, False + + def __call__(self, text): + words = self.word_tokenize_func(text) + + prons = [] + for word, without_changes in words: + if without_changes: + prons.extend(word) + continue + + word_by_hyphen = word.split("-") + + pron, is_handled = self.parse_one_word(word) + + if not is_handled and len(word_by_hyphen) > 1: + pron = [] + for sub_word in word_by_hyphen: + p, _ = self.parse_one_word(sub_word) + pron.extend(p) + pron.extend(["-"]) + pron.pop() + + prons.extend(pron) + + return prons diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index 3d30dbfc6e77..9e1209e1fac8 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -16,11 +16,14 @@ import itertools import string from typing import List +from phonemizer import phonemize +import re from nemo.collections.tts.torch.de_utils import german_text_preprocessing from nemo.collections.tts.torch.en_utils import english_text_preprocessing from nemo.utils import logging +_whitespace_re = re.compile(r'\s+') class BaseTokenizer(abc.ABC): PAD, BLANK, OOV = '', '', '' @@ -351,3 +354,114 @@ def encode(self, text): ps = [space] + ps + [space] return [self._token2id[p] for p in ps] + + +class IPAPhonemesTokenizer(BaseTokenizer): + # fmt: off + + _punctuation = ';:,.!?¡¿—…"«»“”#()-~[]|/' + _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻàãäåèéíîôõúûüăēĕĝğĩĭŏŝšũŭžǐǝǧʻˀ˥˦˧˨˩̝̞̠̥̪̃̆̊̍̚εابرسشصفلمهوᵐᵑᵝṣẽ​‍‎’⁠ⁿっゎッヮヶ�" + PAD = '_' + # fmt: on + + PUNCT_LIST = [p for p in _punctuation] + + def __init__( + self, + g2p, + punct=True, + non_default_punct_list=None, + stresses=False, + chars=False, + *, + space=' ', + silence=None, + apostrophe=True, + oov=BaseTokenizer.OOV, + sep='|', # To be able to distinguish between 2/3 letters codes. + add_blank_at=None, + pad_with_space=False, + text_preprocessing_func=lambda text: english_text_preprocessing(text, lower=False), + ): + """English phoneme-based tokenizer. + Args: + g2p: Grapheme to phoneme module. + punct: Whether to reserve grapheme for basic punctuation or not. + non_default_punct_list: List of punctuation marks which will be used instead default. + stresses: Whether to use phonemes codes with stresses (0-2) or not. + chars: Whether to additionally use chars together with phonemes. It is useful if g2p module can return chars too. + space: Space token as string. + silence: Silence token as string (will be disabled if it is None). + apostrophe: Whether to use apostrophe or not. + oov: OOV token as string. + sep: Separation token as string. + add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), + if None then no blank in labels. + pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. + text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. + Basically, it replaces all non-unicode characters with unicode ones. + Note that lower() function shouldn't applied here, because text can contains phonemes (it will be handled by g2p). + """ + + tokens = [] + self.space, tokens = len(tokens), tokens + [space] # Space + + if silence is not None: + self.silence, tokens = len(tokens), tokens + [silence] # Silence + + tokens.extend([l for l in self._letters_ipa]) + tokens.extend([l for l in self._letters]) + + if punct: + tokens.extend(self.PUNCT_LIST) + + super().__init__(tokens, oov=oov, pad=self.PAD, sep=sep, add_blank_at=add_blank_at) + + self.chars = chars + self.punct = punct + self.stresses = stresses + self.pad_with_space = pad_with_space + + self.text_preprocessing_func = text_preprocessing_func + self.g2p = g2p + + def encode(self, text): + """See base class.""" + ps, space, tokens = [], self.tokens[self.space], set(self.tokens) + + text = self.text_preprocessing_func(text) + if self.g2p is None: + g2p_text = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=self.stresses) + g2p_text = re.sub(_whitespace_re, ' ', g2p_text) + else: + g2p_text = self.g2p(text) + + for p in g2p_text: # noqa + # Remove stress + if p.isalnum() and len(p) == 3 and not self.stresses: + p = p[:2] + + # Add space if last one isn't one + if p == space and len(ps) > 0 and ps[-1] != space: + ps.append(p) + # Add next phoneme or char (if chars=True) + elif (p.isalnum() or p == "'") and p in tokens: + ps.append(p) + # Add punct + elif (p in self.PUNCT_LIST) and self.punct: + ps.append(p) + # Warn about unknown char/phoneme + elif p != space: + logging.warning( + f"Text: [{''.join(g2p_text)}] contains unknown char/phoneme: [{p}]. Original text: [{text}]. Symbol will be skipped." + ) + + # Remove trailing spaces + while ps[-1] == space: + ps.pop() + + if self.pad_with_space: + ps = [space] + ps + [space] + + return [self._token2id[p] for p in ps] \ No newline at end of file From 9ae3f9d7977e6382302c7663d448dcca1d167efc Mon Sep 17 00:00:00 2001 From: treacker Date: Tue, 22 Feb 2022 07:47:09 -0800 Subject: [PATCH 077/244] refactored IPA g2p --- examples/tts/conf/vits.yaml | 23 ++- nemo/collections/tts/torch/en_utils.py | 30 ---- nemo/collections/tts/torch/g2ps.py | 153 ++----------------- nemo/collections/tts/torch/tts_tokenizers.py | 37 +---- 4 files changed, 31 insertions(+), 212 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index a865bfaab40c..55d0f043961e 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -17,8 +17,6 @@ phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" -filter_channels: 768 -filter_length: 1024 model: pitch_fmin: 65.40639132514966 @@ -51,8 +49,8 @@ model: c_kl: 1. inter_channels: 192 hidden_channels: 192 - filter_channels: ${filter_channels} - filter_length: ${filter_length} + filter_channels: 768 + filter_length: 1024 n_heads: 2 p_dropout: 0.1 n_layers_q: 3 @@ -79,8 +77,9 @@ model: chars: true apostrophe: true pad_with_space: true - g2p: null - # _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + g2p: + _target_: nemo.collections.tts.torch.g2ps.IPAG2p + strip: true # phoneme_dict: ${phoneme_dict_path} # heteronyms: ${heteronyms_path} # text_tokenizer: @@ -119,7 +118,7 @@ model: pin_memory: false batch_sampler: - batch_size: 128 + batch_size: 32 boundaries: [32,300,400,500,600,700,800,900,1000] num_replicas: ${trainer.devices} shuffle: true @@ -199,21 +198,21 @@ trainer: # gradient_clip_val: 1000.0 checkpoint_callback: false # Provided by exp_manager logger: false # Provided by exp_manager - log_every_n_steps: 50 + log_every_n_steps: 200 flush_logs_every_n_steps: 1000 check_val_every_n_epoch: 2 exp_manager: - exp_dir: vits_char + exp_dir: vits_ipa_ngc name: ${name} - create_tensorboard_logger: true + create_tensorboard_logger: false create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: false + create_wandb_logger: true wandb_logger_kwargs: - name: VITS_char + name: VITS_ipa_ngc project: ${name} entity: treacker15 resume_if_exists: false diff --git a/nemo/collections/tts/torch/en_utils.py b/nemo/collections/tts/torch/en_utils.py index e026baaa2874..2d9732f39ec3 100644 --- a/nemo/collections/tts/torch/en_utils.py +++ b/nemo/collections/tts/torch/en_utils.py @@ -24,28 +24,6 @@ } SYNOGLYPH2ASCII = {g: asc for asc, glyphs in _synoglyphs.items() for g in glyphs} -# List of (regular expression, replacement) pairs for abbreviations: -_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ - ('mrs', 'misess'), - ('mr', 'mister'), - ('dr', 'doctor'), - ('st', 'saint'), - ('co', 'company'), - ('jr', 'junior'), - ('maj', 'major'), - ('gen', 'general'), - ('drs', 'doctors'), - ('rev', 'reverend'), - ('lt', 'lieutenant'), - ('hon', 'honorable'), - ('sgt', 'sergeant'), - ('capt', 'captain'), - ('esq', 'esquire'), - ('ltd', 'limited'), - ('col', 'colonel'), - ('ft', 'fort'), -]] - # Example of parsing by groups via _WORDS_RE. # Groups: @@ -56,12 +34,7 @@ # config file must contain |EY1 EY1|, B, C, D, E, F, and G. # 111111311113111131111111322222222233133133133133133111313 _WORDS_RE = re.compile("([a-zA-Z]+(?:[a-zA-Z-']*[a-zA-Z]+)*)|(\|[^|]*\|)|([^a-zA-Z|]+)") -_whitespace_re = re.compile(r'\s+') -def expand_abbreviations(text): - for regex, replacement in _abbreviations: - text = re.sub(regex, replacement, text) - return text def english_text_preprocessing(text, lower=True, abbreviations=True): text = unicode(text) @@ -71,9 +44,6 @@ def english_text_preprocessing(text, lower=True, abbreviations=True): if lower: text = text.lower() - if abbreviations: - text = expand_abbreviations(text) - return text diff --git a/nemo/collections/tts/torch/g2ps.py b/nemo/collections/tts/torch/g2ps.py index 594c66d99186..d3f43586481b 100644 --- a/nemo/collections/tts/torch/g2ps.py +++ b/nemo/collections/tts/torch/g2ps.py @@ -19,14 +19,17 @@ import nltk import torch +import phonemizer +from phonemizer import phonemize from nemo.collections.tts.torch.en_utils import english_word_tokenize from nemo.utils import logging from nemo.utils.get_rank import is_global_rank_zero +global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True) _alt_re = re.compile(r'\([0-9]+\)') - +_whitespace_re = re.compile(r'\s+') class BaseG2p(abc.ABC): def __init__( @@ -232,12 +235,8 @@ def __call__(self, text): class IPAG2p(BaseG2p): def __init__( self, - phoneme_dict=None, - word_tokenize_func=english_word_tokenize, - apply_to_oov_word=None, - ignore_ambiguous_words=True, - heteronyms=None, - encoding='latin-1', + strip=True, + njobs=1 ): """IPA G2P module. This module converts words from grapheme to phoneme representation using phoneme_dict in CMU dict format. Optionally, it can ignore words which are heteronyms, ambiguous or marked as unchangeable by word_tokenize_func (see code for details). @@ -253,92 +252,15 @@ def __init__( heteronyms (str, Path, List): Path to file with heteronyms (every line is new word) or list of words. encoding: Encoding type. """ - phoneme_dict = ( - - ) - - if apply_to_oov_word is None: - logging.warning( - "apply_to_oov_word=None, it means that some of words will remain unchanged " - "if they are not handled by one of rule in self.parse_one_word(). " - "It is useful when you use tokenizer with set of phonemes and chars together, otherwise it can be not." - ) - - super().__init__( - phoneme_dict=phoneme_dict, word_tokenize_func=word_tokenize_func, apply_to_oov_word=apply_to_oov_word, - ) - - self.ignore_ambiguous_words = ignore_ambiguous_words - self.heteronyms = ( - set(self._parse_file_by_lines(heteronyms, encoding)) - if isinstance(heteronyms, str) or isinstance(heteronyms, pathlib.Path) - else heteronyms - ) - - @staticmethod - def _parse_as_cmu_dict(phoneme_dict_path=None, encoding='latin-1'): - if phoneme_dict_path is None: - # this part of code downloads file, but it is not rank zero guarded - # Try to check if torch distributed is available, if not get global rank zero to download corpora and make - # all other ranks sleep for a minute - if torch.distributed.is_available() and torch.distributed.is_initialized(): - group = torch.distributed.group.WORLD - if is_global_rank_zero(): - try: - nltk.data.find('corpora/cmudict.zip') - except LookupError: - nltk.download('cmudict', quiet=True) - torch.distributed.barrier(group=group) - elif is_global_rank_zero(): - logging.error( - f"Torch distributed needs to be initialized before you initialized EnglishG2p. This class is prone to " - "data access race conditions. Now downloading corpora from global rank 0. If other ranks pass this " - "before rank 0, errors might result." - ) - try: - nltk.data.find('corpora/cmudict.zip') - except LookupError: - nltk.download('cmudict', quiet=True) - else: - logging.error( - f"Torch distributed needs to be initialized before you initialized EnglishG2p. This class is prone to " - "data access race conditions. This process is not rank 0, and now going to sleep for 1 min. If this " - "rank wakes from sleep prior to rank 0 finishing downloading, errors might result." - ) - time.sleep(60) - - logging.warning( - f"English g2p_dict will be used from nltk.corpus.cmudict.dict(), because phoneme_dict_path=None. " - "Note that nltk.corpus.cmudict.dict() has old version (0.6) of CMUDict. " - "You can use the latest official version of CMUDict (0.7b) with additional changes from NVIDIA directly from NeMo " - "using the path scripts/tts_dataset_files/cmudict-0.7b_nv22.01." - ) - - return nltk.corpus.cmudict.dict() - - _alt_re = re.compile(r'\([0-9]+\)') - g2p_dict = {} - with open(phoneme_dict_path, encoding=encoding) as file: - for line in file: - if len(line) and ('A' <= line[0] <= 'Z' or line[0] == "'"): - parts = line.split(' ') - word = re.sub(_alt_re, '', parts[0]) - word = word.lower() + self.strip = strip + self.njobs = njobs - pronunciation = parts[1].strip().split(" ") - if word in g2p_dict: - g2p_dict[word].append(pronunciation) - else: - g2p_dict[word] = [pronunciation] - return g2p_dict @staticmethod def _parse_file_by_lines(p, encoding): with open(p, encoding=encoding) as f: return [l.rstrip() for l in f.readlines()] - def is_unique_in_phoneme_dict(self, word): - return len(self.phoneme_dict[word]) == 1 def parse_one_word(self, word: str): """ @@ -350,60 +272,13 @@ def parse_one_word(self, word: str): if re.search("[a-zA-Z]", word) is None: return list(word), True - # heteronym - if self.heteronyms is not None and word in self.heteronyms: - return word, True + word = global_phonemizer.phonemize([word], strip=self.strip, njobs=self.njobs) + word = re.sub(_whitespace_re, ' ', word[0]) - # `'s` suffix - if ( - len(word) > 2 - and word.endswith("'s") - and (word not in self.phoneme_dict) - and (word[:-2] in self.phoneme_dict) - and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-2])) - ): - return self.phoneme_dict[word[:-2]][0] + ["Z"], True - - # `s` suffix - if ( - len(word) > 1 - and word.endswith("s") - and (word not in self.phoneme_dict) - and (word[:-1] in self.phoneme_dict) - and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-1])) - ): - return self.phoneme_dict[word[:-1]][0] + ["Z"], True - - # phoneme dict - if word in self.phoneme_dict and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word)): - return self.phoneme_dict[word][0], True - - if self.apply_to_oov_word is not None: - return self.apply_to_oov_word(word), False - else: - return word, False + return word, True def __call__(self, text): - words = self.word_tokenize_func(text) - - prons = [] - for word, without_changes in words: - if without_changes: - prons.extend(word) - continue + g2p_text = global_phonemizer.phonemize([text], strip=self.strip, njobs=self.njobs) + g2p_text = re.sub(_whitespace_re, ' ', g2p_text[0]) - word_by_hyphen = word.split("-") - - pron, is_handled = self.parse_one_word(word) - - if not is_handled and len(word_by_hyphen) > 1: - pron = [] - for sub_word in word_by_hyphen: - p, _ = self.parse_one_word(sub_word) - pron.extend(p) - pron.extend(["-"]) - pron.pop() - - prons.extend(pron) - - return prons + return g2p_text diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index 9e1209e1fac8..337dc882e2cb 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -16,14 +16,13 @@ import itertools import string from typing import List -from phonemizer import phonemize + import re from nemo.collections.tts.torch.de_utils import german_text_preprocessing from nemo.collections.tts.torch.en_utils import english_text_preprocessing from nemo.utils import logging -_whitespace_re = re.compile(r'\s+') class BaseTokenizer(abc.ABC): PAD, BLANK, OOV = '', '', '' @@ -428,40 +427,16 @@ def __init__( def encode(self, text): """See base class.""" - ps, space, tokens = [], self.tokens[self.space], set(self.tokens) text = self.text_preprocessing_func(text) - if self.g2p is None: - g2p_text = phonemize(text, language='en-us', backend='espeak', strip=True, preserve_punctuation=True, with_stress=self.stresses) - g2p_text = re.sub(_whitespace_re, ' ', g2p_text) - else: - g2p_text = self.g2p(text) - for p in g2p_text: # noqa - # Remove stress - if p.isalnum() and len(p) == 3 and not self.stresses: - p = p[:2] - - # Add space if last one isn't one - if p == space and len(ps) > 0 and ps[-1] != space: - ps.append(p) - # Add next phoneme or char (if chars=True) - elif (p.isalnum() or p == "'") and p in tokens: - ps.append(p) - # Add punct - elif (p in self.PUNCT_LIST) and self.punct: - ps.append(p) - # Warn about unknown char/phoneme - elif p != space: - logging.warning( - f"Text: [{''.join(g2p_text)}] contains unknown char/phoneme: [{p}]. Original text: [{text}]. Symbol will be skipped." - ) + g2p_text = self.g2p(text) # Remove trailing spaces - while ps[-1] == space: - ps.pop() + # while ps[-1] == space: + # ps.pop() if self.pad_with_space: - ps = [space] + ps + [space] + g2p_text = self.tokens[self.space] + g2p_text + self.tokens[self.space] - return [self._token2id[p] for p in ps] \ No newline at end of file + return [self._token2id[p] for p in g2p_text] \ No newline at end of file From cfd95c0a90da351d1ae6f53b2522518c18921abf Mon Sep 17 00:00:00 2001 From: treacker Date: Thu, 24 Feb 2022 10:32:58 -0800 Subject: [PATCH 078/244] added vits losses --- nemo/collections/tts/losses/vits_losses.py | 65 +++++++++++++++++++++- nemo/collections/tts/models/vits.py | 24 +++++--- 2 files changed, 79 insertions(+), 10 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 388f0ebd0933..3439ba1ff0a0 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -107,4 +107,67 @@ def forward(self, fmap_r, fmap_g): gl = gl.float() loss += torch.mean(torch.abs(rl - gl)) - return loss * 2 \ No newline at end of file + return loss * 2 + +class DiscriminatorLoss(Loss): + """Discriminator Loss module""" + + @property + def input_types(self): + return { + "disc_real_outputs": [NeuralType(('B', 'T'), VoidType())], + "disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "real_losses": [NeuralType(elements_type=LossType())], + "fake_losses": [NeuralType(elements_type=LossType())], + } + + @typecheck() + def forward(self, disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + dr = dr.float() + dg = dg.float() + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg ** 2) + loss += r_loss + g_loss + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +class GeneratorLoss(Loss): + """Generator Loss module""" + + @property + def input_types(self): + return { + "disc_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "fake_losses": [NeuralType(elements_type=LossType())], + } + + @typecheck() + def forward(self, disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + dg = dg.float() + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses \ No newline at end of file diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 7e0bf8053b53..2af56b991b28 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -27,8 +27,12 @@ from torch.nn import functional as F from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler -from nemo.collections.tts.losses.hifigan_losses import DiscriminatorLoss, GeneratorLoss -from nemo.collections.tts.losses.vits_losses import KlLoss, FeatureMatchingLoss +from nemo.collections.tts.losses.vits_losses import ( + KlLoss, + FeatureMatchingLoss, + DiscriminatorLoss, + GeneratorLoss +) from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import ( MultiPeriodDiscriminator, @@ -193,6 +197,9 @@ def get_spec(self, audio): return spec def training_step(self, batch, batch_idx): + # get optimizers + optim_g, optim_d = self.optimizers() + # TODO: support accum gradient or don't allow to use accum gradient in init (y, y_lengths, x, x_lengths) = batch @@ -206,7 +213,7 @@ def training_step(self, batch, batch_idx): mel = spec_to_mel_torch( spec, - self._cfg.filter_length, + self._cfg.n_window_size, self._cfg.n_mel_channels, self._cfg.sample_rate, self._cfg.mel_fmin, @@ -217,7 +224,7 @@ def training_step(self, batch, batch_idx): y_hat = y_hat.float() y_hat_mel = audio_to_mel_torch( y_hat.squeeze(1), - self._cfg.filter_length, + self._cfg.n_window_size, self._cfg.n_mel_channels, self._cfg.sample_rate, self.cfg.n_window_stride, @@ -235,16 +242,15 @@ def training_step(self, batch, batch_idx): disc_generated_outputs=y_d_hat_g) loss_disc_all = loss_disc - # get optimizers - optim_g, optim_d = self.optimizers() + # train discriminator optim_d.zero_grad() self.manual_backward(loss_disc_all) - optim_d.step() # TODO: maybe change it to PTL-based function norm_d = clip_grad_value_(self.net_d.parameters(), None) - + optim_d.step() + with autocast(enabled=True): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) @@ -259,9 +265,9 @@ def training_step(self, batch, batch_idx): # train generator optim_g.zero_grad() self.manual_backward(loss_gen_all) - optim_g.step() # TODO: maybe change it to PTL-based function norm_g = clip_grad_value_(self.net_g.parameters(), None) + optim_g.step() schedulers = self.lr_schedulers() if schedulers is not None: From df4a86611340e3f36bf184f9d304fee665543cb2 Mon Sep 17 00:00:00 2001 From: treacker Date: Tue, 15 Mar 2022 04:25:11 -0700 Subject: [PATCH 079/244] some ref --- examples/tts/conf/vits.yaml | 17 ++++++++++------- nemo/collections/tts/models/vits.py | 6 ++---- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 55d0f043961e..7683d1c7a0be 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -50,7 +50,6 @@ model: inter_channels: 192 hidden_channels: 192 filter_channels: 768 - filter_length: 1024 n_heads: 2 p_dropout: 0.1 n_layers_q: 3 @@ -80,13 +79,17 @@ model: g2p: _target_: nemo.collections.tts.torch.g2ps.IPAG2p strip: true - # phoneme_dict: ${phoneme_dict_path} - # heteronyms: ${heteronyms_path} # text_tokenizer: - # _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishCharsTokenizer + # _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer # punct: true + # stresses: true + # chars: true # apostrophe: true # pad_with_space: true + # g2p: + # _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + # phoneme_dict: ${phoneme_dict_path} + # heteronyms: ${heteronyms_path} train_ds: dataset: @@ -200,10 +203,10 @@ trainer: logger: false # Provided by exp_manager log_every_n_steps: 200 flush_logs_every_n_steps: 1000 - check_val_every_n_epoch: 2 + check_val_every_n_epoch: 5 exp_manager: - exp_dir: vits_ipa_ngc + exp_dir: vits_ipa_fix name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -212,7 +215,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: VITS_ipa_ngc + name: vits_ipa_fix project: ${name} entity: treacker15 resume_if_exists: false diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 2af56b991b28..7e7e31796b4d 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -174,9 +174,8 @@ def configure_optimizers(self): scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] + # only for inference def forward(self, batch, batch_idx): - # TODO: Check if this is correct forward - # only for inference with torch.no_grad(): (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch @@ -243,7 +242,6 @@ def training_step(self, batch, batch_idx): loss_disc_all = loss_disc - # train discriminator optim_d.zero_grad() self.manual_backward(loss_disc_all) @@ -270,7 +268,7 @@ def training_step(self, batch, batch_idx): optim_g.step() schedulers = self.lr_schedulers() - if schedulers is not None: + if schedulers is not None and self.trainer.is_last_batch: sch1, sch2 = schedulers sch1.step() sch2.step() From dcd2596ee428d9eb6e08b947be5f01ab7242bb75 Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 28 Mar 2022 04:06:50 -0700 Subject: [PATCH 080/244] fix --- examples/tts/conf/vits.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 7683d1c7a0be..44537c6cfc91 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -206,7 +206,7 @@ trainer: check_val_every_n_epoch: 5 exp_manager: - exp_dir: vits_ipa_fix + exp_dir: vits_full name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -215,8 +215,8 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_ipa_fix + name: vits_full project: ${name} - entity: treacker15 + entity: nvidia resume_if_exists: false resume_ignore_no_checkpoint: false From 783a9a941bfeea450ad969ed723203c71649181e Mon Sep 17 00:00:00 2001 From: treacker Date: Tue, 5 Apr 2022 05:54:54 -0700 Subject: [PATCH 081/244] added checkpointing --- examples/tts/conf/vits.yaml | 2 +- examples/tts/vits.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 44537c6cfc91..22b283b7ab9a 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -12,7 +12,7 @@ validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" sup_data_path: null sup_data_types: null - +checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 88996500786d..2ae1cde63648 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -33,6 +33,8 @@ def main(cfg): # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) + if cfg.checkpoint_path is not None: + model.load_from_checkpoint(cfg.checkpoint_path) trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) trainer.fit(model) From 2bc0ac13f25f5c323942cce872ec29b6a3484db4 Mon Sep 17 00:00:00 2001 From: treacker Date: Wed, 6 Apr 2022 13:20:02 -0700 Subject: [PATCH 082/244] cp --- examples/tts/conf/vits.yaml | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 22b283b7ab9a..c4d5d47f2e86 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -13,7 +13,9 @@ sup_data_path: null sup_data_types: null checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' -phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" +# checkpoint_path: 'checkpoints/vits-epoch=7719.ckpt' + +honeme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" @@ -192,7 +194,7 @@ model: trainer: num_nodes: 1 - devices: 1 + devices: 2 accelerator: gpu strategy: ddp precision: 16 @@ -206,16 +208,16 @@ trainer: check_val_every_n_epoch: 5 exp_manager: - exp_dir: vits_full + exp_dir: ../exps/vits_full20 name: ${name} - create_tensorboard_logger: false + create_tensorboard_logger: true create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: true + create_wandb_logger: false wandb_logger_kwargs: - name: vits_full + name: vits_full20 project: ${name} entity: nvidia resume_if_exists: false From 4b491bbf1e49632a874966864f20bc6ba7befcdb Mon Sep 17 00:00:00 2001 From: treacker Date: Thu, 7 Apr 2022 00:58:05 -0700 Subject: [PATCH 083/244] cfg --- examples/tts/conf/vits.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index c4d5d47f2e86..447bc26e53f6 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -208,16 +208,16 @@ trainer: check_val_every_n_epoch: 5 exp_manager: - exp_dir: ../exps/vits_full20 + exp_dir: ../exps/vits_full2 name: ${name} - create_tensorboard_logger: true + create_tensorboard_logger: false create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: false + create_wandb_logger: true wandb_logger_kwargs: - name: vits_full20 + name: vits_full2 project: ${name} entity: nvidia resume_if_exists: false From 0736b272a6914801ba13f57052c480c7957981de Mon Sep 17 00:00:00 2001 From: treacker Date: Thu, 7 Apr 2022 01:43:53 -0700 Subject: [PATCH 084/244] merged some 1.8.0 fixes --- .../asr/parts/preprocessing/features.py | 18 +--- nemo/collections/tts/torch/data.py | 65 ++++++++++--- nemo/utils/exp_manager.py | 91 ++++++------------- tutorials/tts/Inference_ModelSelect.ipynb | 9 +- 4 files changed, 89 insertions(+), 94 deletions(-) diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index f9a488f3a8ac..b513833dc443 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -42,21 +42,12 @@ import torch.nn.functional as F from librosa.util import tiny from torch.autograd import Variable +from torch_stft import STFT from nemo.collections.asr.parts.preprocessing.perturb import AudioAugmentor from nemo.collections.asr.parts.preprocessing.segment import AudioSegment from nemo.utils import logging -# TODO @blisc: Perhaps refactor instead of import guarding -try: - from torch_stft import STFT -except ModuleNotFoundError: - from nemo.utils.exceptions import CheckInstall - - # fmt: off - class STFT(CheckInstall): pass - # fmt: on - CONSTANT = 1e-5 @@ -178,8 +169,8 @@ def inverse(self, magnitude, phase): if self.window is not None: window_sum = librosa.filters.window_sumsquare( - self.window, - magnitude.size(-1), + window=self.window, + n_frames=magnitude.size(-1), hop_length=self.hop_length, win_length=self.win_length, n_fft=self.filter_length, @@ -311,7 +302,8 @@ def __init__( highfreq = highfreq or sample_rate / 2 filterbanks = torch.tensor( - librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), dtype=torch.float + librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=nfilt, fmin=lowfreq, fmax=highfreq), + dtype=torch.float, ).unsqueeze(0) self.register_buffer("fb", filterbanks) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 42dcc854f410..08d621368a30 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -22,6 +22,7 @@ from typing import Callable, Dict, List, Optional, Union import librosa +import numpy as np import torch from nemo_text_processing.text_normalization.normalize import Normalizer from tqdm import tqdm @@ -135,9 +136,12 @@ def __init__( # Initialize text tokenizer self.text_tokenizer = text_tokenizer + + self.phoneme_probability = None if isinstance(self.text_tokenizer, BaseTokenizer): self.text_tokenizer_pad_id = text_tokenizer.pad self.tokens = text_tokenizer.tokens + self.phoneme_probability = self.text_tokenizer.phoneme_probability else: if text_tokenizer_pad_id is None: raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer") @@ -147,6 +151,7 @@ def __init__( self.text_tokenizer_pad_id = text_tokenizer_pad_id self.tokens = tokens + self.cache_text = True if self.phoneme_probability is None else False # Initialize text normalizer is specified self.text_normalizer = text_normalizer @@ -181,15 +186,14 @@ def __init__( if "normalized_text" not in item: text = item["text"] - if self.text_normalizer is not None: text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs) - file_info["normalized_text"] = text - file_info["text_tokens"] = self.text_tokenizer(text) else: file_info["normalized_text"] = item["normalized_text"] - file_info["text_tokens"] = self.text_tokenizer(item["normalized_text"]) + + if self.cache_text: + file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) data.append(file_info) self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) @@ -243,6 +247,7 @@ def __init__( hop_length=self.hop_len, win_length=self.win_length, window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + return_complex=True, ) # Initialize sup_data_path, sup_data_types and run preprocessing methods for every supplementary data type @@ -333,6 +338,13 @@ def add_align_prior_matrix(self, **kwargs): self.align_prior_matrix_folder.mkdir(exist_ok=True, parents=True) self.use_beta_binomial_interpolator = kwargs.pop('use_beta_binomial_interpolator', False) + if not self.cache_text: + if 'use_beta_binomial_interpolator' in kwargs and not self.use_beta_binomial_interpolator: + logging.warning( + "phoneme_probability is not None, but use_beta_binomial_interpolator=False, we" + " set use_beta_binomial_interpolator=True manually to use phoneme_probability." + ) + self.use_beta_binomial_interpolator = True if self.use_beta_binomial_interpolator: self.beta_binomial_interpolator = BetaBinomialInterpolator() @@ -389,9 +401,13 @@ def __getitem__(self, index): features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) audio, audio_length = features, torch.tensor(features.shape[0]).long() - # Load text - text = torch.tensor(sample["text_tokens"]).long() - text_length = torch.tensor(len(sample["text_tokens"])).long() + if "text_tokens" in sample: + text = torch.tensor(sample["text_tokens"]).long() + text_length = torch.tensor(len(sample["text_tokens"])).long() + else: + tokenized = self.text_tokenizer(sample["normalized_text"]) + text = torch.tensor(tokenized).long() + text_length = torch.tensor(len(tokenized)).long() # Load mel if needed log_mel, log_mel_length = None, None @@ -423,6 +439,7 @@ def __getitem__(self, index): # Load alignment prior matrix if needed align_prior_matrix = None if AlignPriorMatrix in self.sup_data_types_set: + align_prior_matrix = None if self.use_beta_binomial_interpolator: mel_len = self.get_log_mel(audio).shape[2] align_prior_matrix = torch.from_numpy(self.beta_binomial_interpolator(mel_len, text_length.item())) @@ -727,9 +744,32 @@ def __init__( load_precomputed_mel: bool = False, hop_length: Optional[int] = None, ): - if isinstance(manifest_filepath, str): - manifest_filepath = [manifest_filepath] - self.manifest_filepath = manifest_filepath + """Dataset which can be used for training and fine-tuning vocoder with pre-computed mel-spectrograms. + Args: + manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing information on the + dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid + json. Each line should contain the following: + "audio_filepath": , + "duration": (Optional), + "mel_filepath": (Optional, can be in .npy (numpy.save) or .pt (torch.save) format) + sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. + n_segments (int): The length of audio in samples to load. For example, given a sample rate of 16kHz, and + n_segments=16000, a random 1 second section of audio from the clip will be loaded. The section will + be randomly sampled everytime the audio is batched. Can be set to None to load the entire audio. + Must be specified if load_precomputed_mel is True. + max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be + pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load + audio to compute duration. Defaults to None which does not prune. + min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be + pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load + audio to compute duration. Defaults to None which does not prune. + ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths + that will be pruned prior to training. Defaults to None which does not prune. + trim (bool): Whether to apply librosa.effects.trim to the audio file. Defaults to False. + load_precomputed_mel (bool): Whether to load precomputed mel (useful for fine-tuning). Note: Requires "mel_filepath" to be set in the manifest file. + hop_length (Optional[int]): The hope length between fft computations. Must be specified if load_precomputed_mel is True. + """ + super().__init__() if load_precomputed_mel: if hop_length is None: @@ -842,7 +882,10 @@ def __getitem__(self, index): features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) audio, audio_length = features, torch.tensor(features.shape[0]).long() - mel = torch.load(sample["mel_filepath"]) + if Path(sample["mel_filepath"]).suffix == ".npy": + mel = np.load(sample["mel_filepath"]) + else: + mel = torch.load(sample["mel_filepath"]) frames = math.ceil(self.n_segments / self.hop_length) if len(audio) > self.n_segments: diff --git a/nemo/utils/exp_manager.py b/nemo/utils/exp_manager.py index 7e3c752f6085..7708be007f04 100644 --- a/nemo/utils/exp_manager.py +++ b/nemo/utils/exp_manager.py @@ -32,9 +32,7 @@ from pytorch_lightning.callbacks.timer import Interval, Timer from pytorch_lightning.loggers import LoggerCollection as _LoggerCollection from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger -from pytorch_lightning.plugins.training_type.ddp import DDPPlugin -from pytorch_lightning.trainer.states import RunningStage -from pytorch_lightning.utilities.distributed import rank_zero_info +from pytorch_lightning.strategies.ddp import DDPStrategy from nemo.constants import NEMO_ENV_VARNAME_TESTING, NEMO_ENV_VARNAME_VERSION from nemo.utils import logging, timers @@ -43,6 +41,7 @@ from nemo.utils.exceptions import NeMoBaseException from nemo.utils.get_rank import is_global_rank_zero from nemo.utils.lightning_logger_patch import add_filehandlers_to_pl_logger +from nemo.utils.model_utils import inject_model_parallel_rank, uninject_model_parallel_rank class NotFoundError(NeMoBaseException): @@ -75,13 +74,13 @@ class CallbackParams: save_top_k: Optional[int] = 3 save_weights_only: Optional[bool] = False mode: Optional[str] = "min" - every_n_val_epochs: Optional[int] = 1 + every_n_epochs: Optional[int] = 1 prefix: Optional[str] = None # If None, exp_manager will attempt to handle the filepath postfix: str = ".nemo" save_best_model: bool = False always_save_nemo: bool = False save_nemo_on_train_end: Optional[bool] = True # Whether to automatically save .nemo file durin on_train_end hook - model_parallel_size: Optional[int] = None + model_parallel_size: Optional[int] = None # tensor parallel size * pipeline parallel size @dataclass @@ -117,7 +116,6 @@ class ExpManagerConfig: # logs timing of train/val/test steps log_step_timing: Optional[bool] = True step_timing_kwargs: Optional[StepTimingParams] = StepTimingParams() - model_parallel_size: Optional[int] = None class TimingCallback(Callback): @@ -139,10 +137,10 @@ def _on_batch_end(self, name, pl_module): self.timer.stop(name) pl_module.log(name, self.timer[name], on_step=True, on_epoch=False) - def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): + def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): self._on_batch_start("train_step_timing") - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): + def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): self._on_batch_end("train_step_timing", pl_module) def on_validation_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): @@ -194,7 +192,7 @@ def exp_manager(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictCo lightning's TensorboardLogger system of using version_{int}. - use_datetime_version (bool): Whether to use a datetime string for version. Defaults to True. - resume_if_exists (bool): Whether this experiment is resuming from a previous run. If True, it sets - trainer.checkpoint_connector.resume_from_checkpoint_fit_path so that the trainer should auto-resume. exp_manager will move files + trainer._checkpoint_connector.resume_from_checkpoint_fit_path so that the trainer should auto-resume. exp_manager will move files under log_dir to log_dir/run_{int}. Defaults to False. From v1.0.0, when resume_if_exists is True, we would not create version folders to make it easier to find the log folder for next runs. - resume_past_end (bool): exp_manager errors out if resume_if_exists is True and a checkpoint matching @@ -371,7 +369,7 @@ def error_checks(trainer: 'pytorch_lightning.Trainer', cfg: Optional[Union[DictC "You are running multi-node training without SLURM handling the processes." " Please note that this is not tested in NeMo and could result in errors." ) - if trainer.num_gpus > 1 and not isinstance(trainer.accelerator.training_type_plugin, DDPPlugin): + if trainer.num_gpus > 1 and not isinstance(trainer.strategy, DDPStrategy): logging.error( "You are running multi-gpu without ddp.Please note that this is not tested in NeMo and could result in " "errors." @@ -385,7 +383,7 @@ def check_resume( resume_ignore_no_checkpoint: bool = False, ): """Checks that resume=True was used correctly with the arguments pass to exp_manager. Sets - trainer.checkpoint_connector.resume_from_checkpoint_fit_path as necessary. + trainer._checkpoint_connector.resume_from_checkpoint_fit_path as necessary. Returns: log_dir (Path): the log_dir @@ -433,15 +431,16 @@ def check_resume( else: raise NotFoundError(f"There were no checkpoints found in {checkpoint_dir}. Cannot resume.") elif len(last_checkpoints) > 1: - if 'mp_rank' in str(last_checkpoints[0]): + if 'mp_rank' in str(last_checkpoints[0]) or 'tp_rank' in str(last_checkpoints[0]): checkpoint = last_checkpoints[0] + checkpoint = uninject_model_parallel_rank(checkpoint) else: raise ValueError(f"Multiple checkpoints {last_checkpoints} that matches *last.ckpt.") else: logging.info(f"Resuming from {last_checkpoints[0]}") checkpoint = last_checkpoints[0] - trainer.checkpoint_connector.resume_from_checkpoint_fit_path = str(checkpoint) + trainer._checkpoint_connector.resume_from_checkpoint_fit_path = str(checkpoint) if is_global_rank_zero(): # Check to see if any files exist that need to be moved @@ -660,7 +659,7 @@ def configure_loggers( logger_list = ( LoggerList(logger_list, nemo_name=name, nemo_version=version) if len(logger_list) > 1 else logger_list[0] ) - trainer.logger_connector.configure_logger(logger_list) + trainer._logger_connector.configure_logger(logger_list) class NeMoModelCheckpoint(ModelCheckpoint): @@ -720,8 +719,8 @@ def nemo_topk_check_previous_run(self): checkpoints = list(Path(self.dirpath).rglob("*.ckpt")) for checkpoint in checkpoints: - if self.model_parallel_size is not None and self.model_parallel_size > 1: - checkpoint = self._uninject_mp_rank(checkpoint) + if 'mp_rank' in str(checkpoint) or 'tp_rank' in str(checkpoint): + checkpoint = uninject_model_parallel_rank(checkpoint) checkpoint = str(checkpoint) if checkpoint[-10:] == '-last.ckpt': continue @@ -755,25 +754,6 @@ def nemo_topk_check_previous_run(self): self.best_model_path = best_k_models[0] self.best_model_score = self.best_k_models[self.best_model_path] - @staticmethod - def _uninject_mp_rank(filepath): - dirname = os.path.dirname(os.path.dirname(filepath)) - basename = os.path.basename(filepath) - filepath = os.path.join(dirname, basename) - return filepath - - # TODO remove _save_last_checkpoint after fix for issue #https://github.com/PyTorchLightning/pytorch-lightning/issues/11451 - def _save_last_checkpoint(self, trainer, monitor_candidates) -> None: - if not self.save_last: - return - - filepath = self.format_checkpoint_name(monitor_candidates, self.CHECKPOINT_NAME_LAST) - if self.last_model_path and self.last_model_path != filepath: - trainer.training_type_plugin.remove_checkpoint(self.last_model_path) - - self.last_model_path = filepath - trainer.save_checkpoint(filepath, self.save_weights_only) - def on_save_checkpoint(self, trainer, pl_module, checkpoint): # output = None output = super().on_save_checkpoint(trainer, pl_module, checkpoint) @@ -817,13 +797,15 @@ def on_train_end(self, trainer, pl_module): # Load the best model and then re-save it if self.save_best_model: + # wait for all processes + trainer.training_type_plugin.barrier("SaveBestCheckpointConnector.resume_end") if self.best_model_path == "": logging.warning( f"{self} was told to save the best checkpoint at the end of training, but no saved checkpoints " "were found. Saving latest model instead." ) else: - trainer.checkpoint_connector.restore(self.best_model_path) + trainer._checkpoint_connector.restore(self.best_model_path) if self.save_nemo_on_train_end: pl_module.save_to(save_path=os.path.join(self.dirpath, self.prefix + self.postfix)) @@ -832,9 +814,7 @@ def _del_model_without_trainer(self, filepath: str) -> None: app_state = AppState() if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1: # filepath needs to be updated to include mp_rank - dirname = os.path.dirname(filepath) - basename = os.path.basename(filepath) - filepath = f'{dirname}/mp_rank_{app_state.model_parallel_rank:02d}/{basename}' + filepath = inject_model_parallel_rank(filepath) # each model parallel rank needs to remove its model if is_global_rank_zero() or (app_state.model_parallel_size is not None and app_state.data_parallel_rank == 0): @@ -905,11 +885,9 @@ def configure_checkpointing( ) checkpoint_callback = NeMoModelCheckpoint(n_resume=resume, **params) - checkpoint_callback.last_model_path = trainer.checkpoint_connector.resume_from_checkpoint_fit_path or "" - if params.model_parallel_size is not None and params.model_parallel_size > 1: - checkpoint_callback.last_model_path = NeMoModelCheckpoint._uninject_mp_rank( - checkpoint_callback.last_model_path - ) + checkpoint_callback.last_model_path = trainer._checkpoint_connector.resume_from_checkpoint_fit_path or "" + if 'mp_rank' in checkpoint_callback.last_model_path or 'tp_rank' in checkpoint_callback.last_model_path: + checkpoint_callback.last_model_path = uninject_model_parallel_rank(checkpoint_callback.last_model_path) trainer.callbacks.append(checkpoint_callback) @@ -926,24 +904,9 @@ class StatelessTimer(Timer): def __init__(self, duration: timedelta = None, interval: str = Interval.step, verbose: bool = True,) -> None: super().__init__(duration, interval, verbose) - def on_save_checkpoint(self, trainer, pl_module, checkpoint) -> Dict[str, Any]: - return + # Override PTL Timer's state dict to not store elapsed time information so that we can restore and continue training. + def state_dict(self) -> Dict[str, Any]: + return {} - def on_load_checkpoint(self, trainer, pl_module, callback_state) -> None: - return - - def _check_time_remaining(self, trainer) -> None: - # Default timer only checks for train time exceeding max_time, this includes time for all stages. - train_duration = self.time_elapsed(RunningStage.TRAINING) - validation_duration = self.time_elapsed(RunningStage.VALIDATING) - test_duration = self.time_elapsed(RunningStage.TESTING) - total_duration = train_duration + validation_duration + test_duration - should_stop = total_duration >= self._duration - # should_stop = trainer.training_type_plugin.broadcast(should_stop) - should_stop = trainer.training_type_plugin.reduce_boolean_decision(should_stop) - trainer.should_stop = trainer.should_stop or should_stop - if should_stop and self._verbose: - rank_zero_info(f"Time limit reached. Signaling Trainer to stop.") - rank_zero_info( - f"Spent {timedelta(seconds=train_duration)} seconds on training, {timedelta(seconds=validation_duration)} seconds on validation and {timedelta(seconds=test_duration)} seconds on testing" - ) + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + return \ No newline at end of file diff --git a/tutorials/tts/Inference_ModelSelect.ipynb b/tutorials/tts/Inference_ModelSelect.ipynb index 30daf755ba94..e82dcbdbc860 100644 --- a/tutorials/tts/Inference_ModelSelect.ipynb +++ b/tutorials/tts/Inference_ModelSelect.ipynb @@ -46,11 +46,11 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", + "BRANCH = 'main'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode\n", - "# BRANCH = 'main'\n", - "# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[tts]" + "# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, { @@ -231,9 +231,6 @@ " \"mixerttsx\": \"tts_en_lj_hifigan_ft_mixerttsx\"\n", " }\n", " pretrained_model = spectrogram_generator2ft_hifigan.get(spectrogram_generator, \"tts_hifigan\")\n", - " elif audio_generator == \"univnet\":\n", - " from nemo.collections.tts.models import UnivNetModel\n", - " pretrained_model = \"tts_en_lj_univnet\"\n", " elif audio_generator == \"griffin-lim\":\n", " from nemo.collections.tts.models import TwoStagesModel\n", " cfg = {'linvocoder': {'_target_': 'nemo.collections.tts.models.two_stages.GriffinLimModel',\n", @@ -410,4 +407,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} From 38a93b4a58a5139dc7bef57cb29af6e99d6bbeb8 Mon Sep 17 00:00:00 2001 From: treacker Date: Fri, 8 Apr 2022 06:01:27 -0700 Subject: [PATCH 085/244] plt fix --- examples/tts/conf/vits.yaml | 14 ++++--- examples/tts/vits.py | 2 +- nemo/collections/tts/torch/tts_tokenizers.py | 7 +++- nemo/utils/model_utils.py | 44 +++++++++++++++++++- requirements/requirements_lightning.txt | 2 +- 5 files changed, 59 insertions(+), 10 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 447bc26e53f6..7ea922313418 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -6,14 +6,16 @@ name: VITS +# train_dataset: "../datasets/ljspeech_ds/LJSpeech-1.1/train_manifest.json" +train_dataset: "raid/datasets/tts_data/train_manifest.json" -train_dataset: "../datasets/ljspeech_ds/LJSpeech-1.1/train_manifest.json" -validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" +# validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" +validation_datasets: "raid/datasets/tts_data/val_manifest.json" sup_data_path: null sup_data_types: null -checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' -# checkpoint_path: 'checkpoints/vits-epoch=7719.ckpt' +# checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' +checkpoint_path: 'checkpoint' honeme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" @@ -203,9 +205,9 @@ trainer: # gradient_clip_val: 1000.0 checkpoint_callback: false # Provided by exp_manager logger: false # Provided by exp_manager - log_every_n_steps: 200 + log_every_n_steps: 199 flush_logs_every_n_steps: 1000 - check_val_every_n_epoch: 5 + check_val_every_n_epoch: 1 exp_manager: exp_dir: ../exps/vits_full2 diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 2ae1cde63648..c2ec3a87e503 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -34,7 +34,7 @@ def main(cfg): exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) if cfg.checkpoint_path is not None: - model.load_from_checkpoint(cfg.checkpoint_path) + model = VitsModel.load_from_checkpoint(cfg.checkpoint_path) trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) trainer.fit(model) diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index 337dc882e2cb..677de052e7f4 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -283,6 +283,9 @@ def __init__( Basically, it replaces all non-unicode characters with unicode ones. Note that lower() function shouldn't applied here, because text can contains phonemes (it will be handled by g2p). """ + self.phoneme_probability = None + if hasattr(g2p, "phoneme_probability"): + self.phoneme_probability = g2p.phoneme_probability tokens = [] self.space, tokens = len(tokens), tokens + [space] # Space @@ -402,7 +405,9 @@ def __init__( Basically, it replaces all non-unicode characters with unicode ones. Note that lower() function shouldn't applied here, because text can contains phonemes (it will be handled by g2p). """ - + self.phoneme_probability = None + if hasattr(g2p, "phoneme_probability"): + self.phoneme_probability = g2p.phoneme_probability tokens = [] self.space, tokens = len(tokens), tokens + [space] # Space diff --git a/nemo/utils/model_utils.py b/nemo/utils/model_utils.py index deebfe1b34f7..6196aa60f0c3 100644 --- a/nemo/utils/model_utils.py +++ b/nemo/utils/model_utils.py @@ -23,7 +23,7 @@ import nemo from nemo import constants -from nemo.utils import logging +from nemo.utils import AppState, logging # TODO @blisc: Perhaps refactor instead of import guarding @@ -175,6 +175,15 @@ def parse_dataset_as_name(name: str) -> str: if 'dataset' in name: name = name.replace('dataset', '') + # Test if the manifes/dataset name was simply `manifest.yaml` or `dataset.yaml`: Invalid names. + if name == '': + raise ValueError( + "Provided dataset / manifest filename was `manifest.json` or `dataset.json`.\n" + "Such a name is invalid, since multiple datasets/manifests can share the same name,\n" + "thereby overriding their results during logging. Please pick a more discriptive filename \n" + "for the provided dataset / manifest file." + ) + if '_' != name[-1]: name = name + '_' @@ -573,3 +582,36 @@ def resolve_cache_dir() -> Path: else: path = Path(override_dir).resolve() return path + + +def uninject_model_parallel_rank(filepath): + filepath = str(filepath) + if 'mp_rank' in filepath or 'tp_rank' in filepath: + dirname = os.path.dirname(os.path.dirname(filepath)) + basename = os.path.basename(filepath) + filepath = os.path.join(dirname, basename) + return filepath + else: + return filepath + + +def inject_model_parallel_rank(filepath): + """ + Injects tensor/pipeline model parallel ranks into the filepath. + Does nothing if not using model parallelism. + """ + # first make sure filepath does not have rank + filepath = uninject_model_parallel_rank(filepath) + + app_state = AppState() + if app_state.model_parallel_size is not None and app_state.model_parallel_size > 1: + # filepath needs to be updated to include mp_rank + dirname = os.path.dirname(filepath) + basename = os.path.basename(filepath) + if app_state.pipeline_model_parallel_size is None or app_state.pipeline_model_parallel_size == 1: + filepath = f'{dirname}/mp_rank_{app_state.tensor_model_parallel_rank:02d}/{basename}' + else: + filepath = f'{dirname}/tp_rank_{app_state.tensor_model_parallel_rank:02d}_pp_rank_{app_state.pipeline_model_parallel_rank:03d}/{basename}' + return filepath + else: + return filepath \ No newline at end of file diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index 33cbdfb44f64..68cabfa7f2b3 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning>=1.5.9 +pytorch-lightning>=1.6.0 torchmetrics>=0.4.1rc0 transformers>=4.0.1 webdataset>=0.1.48,<=0.1.62 From 0f50fce026795abbc4808cee4f1760ffd90b8b21 Mon Sep 17 00:00:00 2001 From: treacker Date: Sat, 9 Apr 2022 12:52:32 -0700 Subject: [PATCH 086/244] fix logging --- examples/tts/conf/vits.yaml | 9 +++-- nemo/collections/tts/models/vits.py | 59 +++++++++++++++-------------- 2 files changed, 36 insertions(+), 32 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 7ea922313418..df530dc71127 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -16,8 +16,9 @@ sup_data_types: null # checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' checkpoint_path: 'checkpoint' +# checkpoint_path: null -honeme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" @@ -205,12 +206,12 @@ trainer: # gradient_clip_val: 1000.0 checkpoint_callback: false # Provided by exp_manager logger: false # Provided by exp_manager - log_every_n_steps: 199 + log_every_n_steps: 50 flush_logs_every_n_steps: 1000 check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_full2 + exp_dir: ../exps/vits_full2_test name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -219,7 +220,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_full2 + name: vits_full2_test project: ${name} entity: nvidia resume_if_exists: false diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 7e7e31796b4d..4e6aae1b9847 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -308,34 +308,37 @@ def validation_step(self, batch, batch_idx): y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) # plot audio once per epoch - if batch_idx == 0 and self.logger is not None and isinstance(self.logger, WandbLogger): - specs = [] - audios = [] - - specs += [ - wandb.Image( - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", - ), - wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - caption=f"val_mel_predicted", - ), - ] - - audios += [ - wandb.Audio( - y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_target", - sample_rate=self.sample_rate, - ), - wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_predicted", - sample_rate=self.sample_rate, - ), - ] - - self.logger.experiment.log({"specs": specs, "audios": audios}) + if batch_idx == 0: + logger = self.logger.experiment + # print(logger, self.logger) + if logger is not None and isinstance(self.logger, WandbLogger): + specs = [] + audios = [] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + caption=f"val_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_target", + sample_rate=self.sample_rate, + ), + wandb.Audio( + y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_predicted", + sample_rate=self.sample_rate, + ), + ] + + logger.log({"specs": specs, "audios": audios}) def _loader(self, cfg): try: From e312e961428e7c8d1b20086433060e272a0103d1 Mon Sep 17 00:00:00 2001 From: treacker Date: Sun, 10 Apr 2022 04:42:46 -0700 Subject: [PATCH 087/244] fix checkpoint loading --- examples/tts/conf/vits.yaml | 4 ++-- examples/tts/vits.py | 7 +++---- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index df530dc71127..5fa34321b703 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -211,7 +211,7 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_full2_test + exp_dir: ../exps/vits_full2_logs name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -220,7 +220,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_full2_test + name: vits_full2_fixed project: ${name} entity: nvidia resume_if_exists: false diff --git a/examples/tts/vits.py b/examples/tts/vits.py index c2ec3a87e503..719f599a7445 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -29,12 +29,11 @@ def main(cfg): scaler = GradScaler(enabled=True) plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) - trainer = pl.Trainer(plugins=plugins, replace_sampler_ddp=False, **cfg.trainer) - # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) + trainer = pl.Trainer(resume_from_checkpoint=cfg.checkpoint_path, plugins=plugins, replace_sampler_ddp=False, **cfg.trainer) + # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) - if cfg.checkpoint_path is not None: - model = VitsModel.load_from_checkpoint(cfg.checkpoint_path) + trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) trainer.fit(model) From fe3901a206bc241510322764567f314f8ba705e5 Mon Sep 17 00:00:00 2001 From: treacker Date: Wed, 20 Apr 2022 09:17:08 -0700 Subject: [PATCH 088/244] refactored inference --- nemo/collections/tts/models/vits.py | 12 ++++++------ nemo/collections/tts/modules/vits_modules.py | 1 - 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 4e6aae1b9847..a8eef40c9dd2 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -175,17 +175,17 @@ def configure_optimizers(self): return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] # only for inference - def forward(self, batch, batch_idx): + def forward(self, batch, batch_idx, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): with torch.no_grad(): - (x, x_lengths, spec, spec_lengths, y, y_lengths) = batch - + (y, y_lengths, x, x_lengths) = batch # remove else x = x[:1] x_lengths = x_lengths[:1] - y_hat, attn, mask, *_ = self.net_g.module.infer(x, x_lengths, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.hop_size - return y_hat, y_hat_lengths + y_hat, attn, mask, (z, z_p, m_p, logs_p) = self.net_g.infer(x, x_lengths, sid=sid, noise_scale=noise_scale, + length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.n_window_stride + return y_hat, y_hat_lengths, (z, z_p, m_p, logs_p) def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 3aa6edd4f01f..30275cc49ed6 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -575,7 +575,6 @@ def __init__(self, self.kernel_size = kernel_size self.p_dropout = p_dropout - # TODO: add padding idx in __init__, specify padding idx in self.emb self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) From 5e38eba27a2f376e967d138817af06dca4d892b8 Mon Sep 17 00:00:00 2001 From: treacker Date: Thu, 21 Apr 2022 01:40:33 -0700 Subject: [PATCH 089/244] fp32 run --- examples/tts/conf/vits.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 5fa34321b703..575050d80e89 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -15,8 +15,8 @@ sup_data_path: null sup_data_types: null # checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' -checkpoint_path: 'checkpoint' -# checkpoint_path: null +# checkpoint_path: 'checkpoint' +checkpoint_path: null phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" @@ -200,7 +200,7 @@ trainer: devices: 2 accelerator: gpu strategy: ddp - precision: 16 + precision: 32 max_epochs: 1000000 accumulate_grad_batches: 1 # gradient_clip_val: 1000.0 @@ -211,7 +211,7 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_full2_logs + exp_dir: ../exps/vits_fp32 name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -220,7 +220,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_full2_fixed + name: vits_fp32 project: ${name} entity: nvidia resume_if_exists: false From 47741d4a3a7cdb1fc5f75cbbf6f329467fbc1ad0 Mon Sep 17 00:00:00 2001 From: ericharper Date: Sun, 1 May 2022 23:37:22 -0600 Subject: [PATCH 090/244] update branch Signed-off-by: ericharper --- Jenkinsfile | 222 +++++++++--------- tutorials/00_NeMo_Primer.ipynb | 2 +- tutorials/01_NeMo_Models.ipynb | 2 +- tutorials/AudioTranslationSample.ipynb | 2 +- tutorials/VoiceSwapSample.ipynb | 2 +- .../asr/ASR_CTC_Language_Finetuning.ipynb | 2 +- tutorials/asr/ASR_for_telephony_speech.ipynb | 2 +- tutorials/asr/ASR_with_NeMo.ipynb | 2 +- .../asr/ASR_with_Subword_Tokenization.ipynb | 2 +- tutorials/asr/ASR_with_Transducers.ipynb | 2 +- .../asr/Buffered_Transducer_Inference.ipynb | 2 +- ..._Transducer_Inference_with_LCS_Merge.ipynb | 2 +- tutorials/asr/Intro_to_Transducers.ipynb | 2 +- tutorials/asr/Offline_ASR.ipynb | 2 +- .../Offline_ASR_with_VAD_for_CTC_models.ipynb | 2 +- .../asr/Online_ASR_Microphone_Demo.ipynb | 2 +- tutorials/asr/Online_Noise_Augmentation.ipynb | 2 +- .../Online_Offline_Microphone_VAD_Demo.ipynb | 2 +- .../Online_Offline_Speech_Commands_Demo.ipynb | 2 +- .../asr/Self_Supervised_Pre_Training.ipynb | 2 +- tutorials/asr/Speech_Commands.ipynb | 2 +- tutorials/asr/Streaming_ASR.ipynb | 2 +- tutorials/asr/Voice_Activity_Detection.ipynb | 2 +- ...Language_Models_for_Downstream_Tasks.ipynb | 2 +- tutorials/nlp/02_NLP_Tokenizers.ipynb | 4 +- ...a_Preprocessing_and_Cleaning_for_NMT.ipynb | 2 +- tutorials/nlp/Entity_Linking_Medical.ipynb | 2 +- tutorials/nlp/GLUE_Benchmark.ipynb | 2 +- ...Joint_Intent_and_Slot_Classification.ipynb | 2 +- tutorials/nlp/MegatronBert_export.ipynb | 4 +- ...on_Synthetic_Tabular_Data_Generation.ipynb | 2 +- .../Non_English_Downstream_Tasks_(NER).ipynb | 2 +- tutorials/nlp/PTune_multiple_NLP_tasks.ipynb | 2 +- .../nlp/Punctuation_and_Capitalization.ipynb | 2 +- tutorials/nlp/Question_Answering_Squad.ipynb | 2 +- .../nlp/Relation_Extraction-BioMegatron.ipynb | 2 +- tutorials/nlp/Text2Sparql.ipynb | 4 +- ...xt_Classification_Sentiment_Analysis.ipynb | 2 +- .../Token_Classification-BioMegatron.ipynb | 2 +- ...ssification_Named_Entity_Recognition.ipynb | 4 +- .../nlp/Zero_Shot_Intent_Recognition.ipynb | 2 +- .../ASR_with_SpeakerDiarization.ipynb | 2 +- .../Speaker_Diarization_Inference.ipynb | 2 +- .../Speaker_Identification_Verification.ipynb | 2 +- .../Inverse_Text_Normalization.ipynb | 2 +- .../text_processing/Text_Normalization.ipynb | 2 +- tutorials/text_processing/WFST_Tutorial.ipynb | 2 +- .../tools/CTC_Segmentation_Tutorial.ipynb | 2 +- tutorials/tts/FastPitch_Finetuning.ipynb | 4 +- .../tts/FastPitch_MixerTTS_Training.ipynb | 4 +- .../tts/Inference_DurationPitchControl.ipynb | 2 +- tutorials/tts/Inference_ModelSelect.ipynb | 2 +- tutorials/tts/Tacotron2_Training.ipynb | 2 +- tutorials/tts/TalkNet_Training.ipynb | 2 +- 54 files changed, 170 insertions(+), 170 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 1d5377b708b6..ea84290171e8 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -47,8 +47,8 @@ pipeline { // stage('Torch TTS unit tests') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // steps { @@ -104,8 +104,8 @@ pipeline { stage('L0: Unit Tests CPU') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } steps { @@ -116,8 +116,8 @@ pipeline { stage('L0: TN/ITN Tests CPU') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -144,8 +144,8 @@ pipeline { stage('L2: NeMo text processing') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -196,8 +196,8 @@ pipeline { stage('L0: Computer Vision Integration') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -224,8 +224,8 @@ pipeline { // stage('L0: Integration Tests CPU') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // steps { @@ -244,7 +244,7 @@ pipeline { // when { // anyOf{ // branch 'dev - // changeRequest target: 'main' + // changeRequest target: 'r1.9.0' // } // } // steps { @@ -255,8 +255,8 @@ pipeline { stage('L2: ASR dev run') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -327,8 +327,8 @@ pipeline { stage('L2: Speaker dev run') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -407,8 +407,8 @@ pipeline { // stage('L2: ASR DALI dev run') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -475,8 +475,8 @@ pipeline { // stage('L2: ASR RNNT dev run') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -519,8 +519,8 @@ pipeline { stage('L2: ASR Multi-dataloader dev run') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -567,8 +567,8 @@ pipeline { stage('L2: ASR Adapters') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -596,8 +596,8 @@ pipeline { stage('L2: Speech Transcription') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -618,8 +618,8 @@ pipeline { stage('L2: Segmentation Tool') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } stages { @@ -675,8 +675,8 @@ pipeline { // stage('L2: Multi-GPU Megatron finetuning') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -702,8 +702,8 @@ pipeline { stage('L2: STS-b') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -756,8 +756,8 @@ pipeline { stage('L2: Dialogue Classification') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -927,8 +927,8 @@ pipeline { stage('L2: Dialogue Generation') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -993,8 +993,8 @@ pipeline { stage('L2: Dialogue Generation Part 2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1023,8 +1023,8 @@ pipeline { stage('L2: Parallel BERT SQUAD v1.1 / v2.0') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1131,8 +1131,8 @@ pipeline { // stage('L2: MegaBERT Token Classification') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -1157,8 +1157,8 @@ pipeline { stage('L2: Parallel SQUAD v1.1 & v2.0') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1237,8 +1237,8 @@ pipeline { stage('L2: Intent and Slot Classification Tasks') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1277,8 +1277,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Text Classification') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -1306,8 +1306,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Autoresume') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -1337,8 +1337,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Evaluation from .nemo') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -1358,8 +1358,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Train from .nemo') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -1381,8 +1381,8 @@ pipeline { stage('L2: Parallel NLP Examples 2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1501,8 +1501,8 @@ pipeline { stage('Punctuation & Capitalization tarred dataset') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1558,8 +1558,8 @@ pipeline { stage('Punctuation & Capitalization inference') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1582,8 +1582,8 @@ pipeline { stage('L2: Parallel Pretraining BERT pretraining from Text/Preprocessed') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1644,8 +1644,8 @@ pipeline { stage('L2: Entity Linking') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1670,8 +1670,8 @@ pipeline { stage('L2: NMT Attention is All You Need Training') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1790,8 +1790,8 @@ pipeline { stage('L2: NMT Attention is All You Need Inference') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1825,8 +1825,8 @@ pipeline { stage('L2: NMT with HuggingFace') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1902,8 +1902,8 @@ pipeline { stage('L2: NMT Tarred Dataset Creation') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -1957,8 +1957,8 @@ pipeline { // stage('L2: NMT Bottleneck Fallback') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -2004,8 +2004,8 @@ pipeline { // stage('L2: NMT Bottleneck Architecture') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -2087,8 +2087,8 @@ pipeline { // stage('L2: NMT Bottleneck LVM') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -2170,8 +2170,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2240,8 +2240,8 @@ pipeline { stage('L2: BioMegatron Bert NER Task') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2258,8 +2258,8 @@ pipeline { stage('L2: Megatron GPT Pretraining and Resume Training TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2330,8 +2330,8 @@ pipeline { stage('L2: Megatron GPT Pretraining and Resume Training PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2402,8 +2402,8 @@ pipeline { stage('L2: Megatron GPT Eval') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2420,8 +2420,8 @@ pipeline { stage('L2: Megatron GPT Prompt Learning and Inference') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2495,8 +2495,8 @@ pipeline { // stage('L2: Megatron GPT Convert from Megatron-LM checkpoing and Eval') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.9.0' + // changeRequest target: 'r1.9.0' // } // } // failFast true @@ -2522,8 +2522,8 @@ pipeline { stage('L2: Megatron Change Partitions') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2561,8 +2561,8 @@ pipeline { stage('L2: Megatron T5 Pretraining and Resume Training TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2619,8 +2619,8 @@ pipeline { stage('L2: Megatron T5 Pretraining and Resume Training PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2677,8 +2677,8 @@ pipeline { stage('L2: Megatron T5 Eval') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2694,8 +2694,8 @@ pipeline { stage('L2: Megatron BART Pretraining and Resume Training, TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2749,8 +2749,8 @@ pipeline { stage('L2: Megatron BART Pretraining and Resume Training, PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2806,8 +2806,8 @@ pipeline { stage('L2: Megatron T5 GLUE/XNLI Finetuning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true @@ -2879,8 +2879,8 @@ pipeline { stage('L2: TTS Fast dev runs 1') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } parallel { @@ -2984,8 +2984,8 @@ pipeline { stage('L??: Speech Checkpoints tests') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.9.0' + changeRequest target: 'r1.9.0' } } failFast true diff --git a/tutorials/00_NeMo_Primer.ipynb b/tutorials/00_NeMo_Primer.ipynb index 761dc2748791..55fc265ad5be 100644 --- a/tutorials/00_NeMo_Primer.ipynb +++ b/tutorials/00_NeMo_Primer.ipynb @@ -65,7 +65,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/01_NeMo_Models.ipynb b/tutorials/01_NeMo_Models.ipynb index 639798da11e9..ca15e44fc991 100644 --- a/tutorials/01_NeMo_Models.ipynb +++ b/tutorials/01_NeMo_Models.ipynb @@ -37,7 +37,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/AudioTranslationSample.ipynb b/tutorials/AudioTranslationSample.ipynb index 120c486afbb9..fcfc38aa0969 100644 --- a/tutorials/AudioTranslationSample.ipynb +++ b/tutorials/AudioTranslationSample.ipynb @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/VoiceSwapSample.ipynb b/tutorials/VoiceSwapSample.ipynb index 647754f073e7..42deb9f584e0 100644 --- a/tutorials/VoiceSwapSample.ipynb +++ b/tutorials/VoiceSwapSample.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n" ] }, diff --git a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb index 9b6fd2777f05..6d49bd65d700 100644 --- a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb +++ b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb @@ -39,7 +39,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/ASR_for_telephony_speech.ipynb b/tutorials/asr/ASR_for_telephony_speech.ipynb index 9b99594c2d5c..3479891abb26 100644 --- a/tutorials/asr/ASR_for_telephony_speech.ipynb +++ b/tutorials/asr/ASR_for_telephony_speech.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/ASR_with_NeMo.ipynb b/tutorials/asr/ASR_with_NeMo.ipynb index 6f3eac25e52b..fceb298480c6 100644 --- a/tutorials/asr/ASR_with_NeMo.ipynb +++ b/tutorials/asr/ASR_with_NeMo.ipynb @@ -53,7 +53,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb index 09976a836a42..65710d7ab435 100644 --- a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb +++ b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb @@ -40,7 +40,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/ASR_with_Transducers.ipynb b/tutorials/asr/ASR_with_Transducers.ipynb index fbaa2ff9a725..734120649992 100644 --- a/tutorials/asr/ASR_with_Transducers.ipynb +++ b/tutorials/asr/ASR_with_Transducers.ipynb @@ -46,7 +46,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/Buffered_Transducer_Inference.ipynb b/tutorials/asr/Buffered_Transducer_Inference.ipynb index 41128549a7bf..a37cfc0e2aa3 100644 --- a/tutorials/asr/Buffered_Transducer_Inference.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference.ipynb @@ -45,7 +45,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# Update numba and restart (this is required to update internal numba version of Colab)\n", diff --git a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb index b327a98ff789..2d7755616b25 100644 --- a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb @@ -45,7 +45,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# Update numba and restart (this is required to update internal numba version of Colab)\n", diff --git a/tutorials/asr/Intro_to_Transducers.ipynb b/tutorials/asr/Intro_to_Transducers.ipynb index cf7038a53c0c..0b4186bed3f4 100644 --- a/tutorials/asr/Intro_to_Transducers.ipynb +++ b/tutorials/asr/Intro_to_Transducers.ipynb @@ -43,7 +43,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ], "execution_count": null, diff --git a/tutorials/asr/Offline_ASR.ipynb b/tutorials/asr/Offline_ASR.ipynb index 6d239f70f20c..2c58697d6401 100644 --- a/tutorials/asr/Offline_ASR.ipynb +++ b/tutorials/asr/Offline_ASR.ipynb @@ -51,7 +51,7 @@ "id": "I9eIxAyKHREB" }, "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "try:\n", " # Import NeMo Speech Recognition collection\n", " import nemo.collections.asr as nemo_asr\n", diff --git a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb index c4b0792cce7a..af6c8ffc477e 100644 --- a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb +++ b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb @@ -22,7 +22,7 @@ "!pip install wget\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Online_ASR_Microphone_Demo.ipynb b/tutorials/asr/Online_ASR_Microphone_Demo.ipynb index 5df93327133f..a62fd311a81a 100644 --- a/tutorials/asr/Online_ASR_Microphone_Demo.ipynb +++ b/tutorials/asr/Online_ASR_Microphone_Demo.ipynb @@ -26,7 +26,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Noise_Augmentation.ipynb b/tutorials/asr/Online_Noise_Augmentation.ipynb index 1c8c78541dcf..da8b85b03fec 100644 --- a/tutorials/asr/Online_Noise_Augmentation.ipynb +++ b/tutorials/asr/Online_Noise_Augmentation.ipynb @@ -31,7 +31,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb b/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb index e193774be33c..e34e30329e4e 100644 --- a/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb +++ b/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb @@ -26,7 +26,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb b/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb index db2e312f3618..1efe298afc12 100644 --- a/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb +++ b/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb @@ -28,7 +28,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Self_Supervised_Pre_Training.ipynb b/tutorials/asr/Self_Supervised_Pre_Training.ipynb index a07c7a6a222e..5093893aad9a 100644 --- a/tutorials/asr/Self_Supervised_Pre_Training.ipynb +++ b/tutorials/asr/Self_Supervised_Pre_Training.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index 1623eb572cf4..fc40552aca1c 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -60,7 +60,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Streaming_ASR.ipynb b/tutorials/asr/Streaming_ASR.ipynb index 436dbe335035..fee010fcea87 100644 --- a/tutorials/asr/Streaming_ASR.ipynb +++ b/tutorials/asr/Streaming_ASR.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index 651c4e7e096d..19a687e0b217 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -27,7 +27,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb b/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb index faa93de12514..3dcee4bebc5e 100644 --- a/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb +++ b/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb @@ -26,7 +26,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/02_NLP_Tokenizers.ipynb b/tutorials/nlp/02_NLP_Tokenizers.ipynb index 61f535be36a6..7199a4c67a14 100644 --- a/tutorials/nlp/02_NLP_Tokenizers.ipynb +++ b/tutorials/nlp/02_NLP_Tokenizers.ipynb @@ -10,7 +10,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ] }, { @@ -35,7 +35,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb b/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb index c91a0adc0640..312e284ac30c 100644 --- a/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb +++ b/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb @@ -23,7 +23,7 @@ "!apt-get install gawk\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "!pip uninstall -y sacrebleu\n", diff --git a/tutorials/nlp/Entity_Linking_Medical.ipynb b/tutorials/nlp/Entity_Linking_Medical.ipynb index e3e51854194a..8b339745a3df 100644 --- a/tutorials/nlp/Entity_Linking_Medical.ipynb +++ b/tutorials/nlp/Entity_Linking_Medical.ipynb @@ -17,7 +17,7 @@ "\"\"\"\n", "\n", "## Install NeMo if using google collab or if its not installed locally\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/nlp/GLUE_Benchmark.ipynb b/tutorials/nlp/GLUE_Benchmark.ipynb index d8fe75940b09..a33a1da9f4b1 100644 --- a/tutorials/nlp/GLUE_Benchmark.ipynb +++ b/tutorials/nlp/GLUE_Benchmark.ipynb @@ -44,7 +44,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" + "BRANCH = 'r1.9.0'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb index 8c69198565cc..e22efa3f0dc4 100644 --- a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb +++ b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/MegatronBert_export.ipynb b/tutorials/nlp/MegatronBert_export.ipynb index dcd434ba8c1b..596ae16811c7 100644 --- a/tutorials/nlp/MegatronBert_export.ipynb +++ b/tutorials/nlp/MegatronBert_export.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='main'" + "BRANCH='r1.9.0'" ] }, { @@ -271,4 +271,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb b/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb index 71ece58c7da0..001f248f5656 100644 --- a/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb +++ b/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb @@ -62,7 +62,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "DATA_PATH='.'\n", "TRANSACTIONS=DATA_PATH+'/card_transaction.v1.csv'\n", "#CHECKPOINTS='/chk_points'\n", diff --git a/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb b/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb index 809c81558947..a61a893e2dd8 100755 --- a/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb +++ b/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb @@ -8,7 +8,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ] }, { diff --git a/tutorials/nlp/PTune_multiple_NLP_tasks.ipynb b/tutorials/nlp/PTune_multiple_NLP_tasks.ipynb index 78ed43dcae22..de314abf2aad 100644 --- a/tutorials/nlp/PTune_multiple_NLP_tasks.ipynb +++ b/tutorials/nlp/PTune_multiple_NLP_tasks.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='main'" + "BRANCH='r1.9.0'" ] }, { diff --git a/tutorials/nlp/Punctuation_and_Capitalization.ipynb b/tutorials/nlp/Punctuation_and_Capitalization.ipynb index cadd50840c3d..8b4b91eff699 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ] }, { diff --git a/tutorials/nlp/Question_Answering_Squad.ipynb b/tutorials/nlp/Question_Answering_Squad.ipynb index d78dc8de7602..d966782d412c 100755 --- a/tutorials/nlp/Question_Answering_Squad.ipynb +++ b/tutorials/nlp/Question_Answering_Squad.ipynb @@ -46,7 +46,7 @@ "id": "uRLPr0TnIAHO" }, "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb index b7c25cb416ef..b62ba9d5dd86 100644 --- a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb +++ b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ] }, { diff --git a/tutorials/nlp/Text2Sparql.ipynb b/tutorials/nlp/Text2Sparql.ipynb index b734e72c1fc6..604a12738e03 100644 --- a/tutorials/nlp/Text2Sparql.ipynb +++ b/tutorials/nlp/Text2Sparql.ipynb @@ -20,7 +20,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, @@ -149,7 +149,7 @@ "WORK_DIR = \"PATH_TO_CHECKPOINTS_AND_LOGS\"\n", "\n", "# NeMo Version\n", - "BRANCH = 'main'\n" + "BRANCH = 'r1.9.0'\n" ] }, { diff --git a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb index 5b5b74e7bf11..9fad5e572dbf 100644 --- a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb +++ b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb @@ -20,7 +20,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", "\n" ] diff --git a/tutorials/nlp/Token_Classification-BioMegatron.ipynb b/tutorials/nlp/Token_Classification-BioMegatron.ipynb index b07dfb061625..422c8cb2d786 100644 --- a/tutorials/nlp/Token_Classification-BioMegatron.ipynb +++ b/tutorials/nlp/Token_Classification-BioMegatron.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='main'" + "BRANCH='r1.9.0'" ] }, { diff --git a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb index 0e8fadde8041..00ff60afda69 100644 --- a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb +++ b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ] }, { @@ -53,7 +53,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" + "BRANCH = 'r1.9.0'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb index 69df7b27b02d..beb434dd12ab 100644 --- a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb +++ b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb index abffa3e7bb44..76dbf7bd12e1 100644 --- a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb +++ b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb @@ -30,7 +30,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb index dc331a7ec0ff..d3671f5ff776 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb @@ -23,7 +23,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index c98a3894237e..5e5b5c9fd4ba 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -27,7 +27,7 @@ "!pip install unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "# Install TorchAudio\n", diff --git a/tutorials/text_processing/Inverse_Text_Normalization.ipynb b/tutorials/text_processing/Inverse_Text_Normalization.ipynb index aad946ebe357..6883ddbeb95e 100755 --- a/tutorials/text_processing/Inverse_Text_Normalization.ipynb +++ b/tutorials/text_processing/Inverse_Text_Normalization.ipynb @@ -56,7 +56,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "\"\"\"\n", "\n", - "BRANCH = 'main'" + "BRANCH = 'r1.9.0'" ], "id": "YxVLI-f97Kxl", "execution_count": null, diff --git a/tutorials/text_processing/Text_Normalization.ipynb b/tutorials/text_processing/Text_Normalization.ipynb index 76a65267f180..e3273ad1738c 100755 --- a/tutorials/text_processing/Text_Normalization.ipynb +++ b/tutorials/text_processing/Text_Normalization.ipynb @@ -64,7 +64,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "if 'google.colab' in str(get_ipython()):\n", " !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ], diff --git a/tutorials/text_processing/WFST_Tutorial.ipynb b/tutorials/text_processing/WFST_Tutorial.ipynb index 294cb497d101..f714c6a6be54 100644 --- a/tutorials/text_processing/WFST_Tutorial.ipynb +++ b/tutorials/text_processing/WFST_Tutorial.ipynb @@ -14,7 +14,7 @@ "source": [ "### WARNING: This notebook will not work in a Colab environment. \n", "\n", - "BRANCH= 'main'\n", + "BRANCH= 'r1.9.0'\n", "\n", "!git clone -b $BRANCH https://github.com/NVIDIA/NeMo\n", "%cd NeMo\n", diff --git a/tutorials/tools/CTC_Segmentation_Tutorial.ipynb b/tutorials/tools/CTC_Segmentation_Tutorial.ipynb index d22258885db8..f18f98002c5f 100644 --- a/tutorials/tools/CTC_Segmentation_Tutorial.ipynb +++ b/tutorials/tools/CTC_Segmentation_Tutorial.ipynb @@ -35,7 +35,7 @@ "id": "d4KCUoxSpdoZ" }, "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "\n", "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", diff --git a/tutorials/tts/FastPitch_Finetuning.ipynb b/tutorials/tts/FastPitch_Finetuning.ipynb index 770eb9f68848..6e4ee72bc5b8 100755 --- a/tutorials/tts/FastPitch_Finetuning.ipynb +++ b/tutorials/tts/FastPitch_Finetuning.ipynb @@ -57,7 +57,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode pynini==2.1.4\n", @@ -715,4 +715,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb index 43ee908e1db2..12a22765ced7 100644 --- a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb @@ -50,7 +50,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode pynini==2.1.4 scipy==1.7.3\n", @@ -606,4 +606,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} +} \ No newline at end of file diff --git a/tutorials/tts/Inference_DurationPitchControl.ipynb b/tutorials/tts/Inference_DurationPitchControl.ipynb index b7918f026171..7b942e349207 100644 --- a/tutorials/tts/Inference_DurationPitchControl.ipynb +++ b/tutorials/tts/Inference_DurationPitchControl.ipynb @@ -46,7 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode\n", diff --git a/tutorials/tts/Inference_ModelSelect.ipynb b/tutorials/tts/Inference_ModelSelect.ipynb index c0ebc64a313e..4f45cd704d7b 100644 --- a/tutorials/tts/Inference_ModelSelect.ipynb +++ b/tutorials/tts/Inference_ModelSelect.ipynb @@ -46,7 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode\n", diff --git a/tutorials/tts/Tacotron2_Training.ipynb b/tutorials/tts/Tacotron2_Training.ipynb index d87482350b75..8109b734af60 100644 --- a/tutorials/tts/Tacotron2_Training.ipynb +++ b/tutorials/tts/Tacotron2_Training.ipynb @@ -54,7 +54,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode\n", diff --git a/tutorials/tts/TalkNet_Training.ipynb b/tutorials/tts/TalkNet_Training.ipynb index 6d69733817c3..a0df2e585d07 100644 --- a/tutorials/tts/TalkNet_Training.ipynb +++ b/tutorials/tts/TalkNet_Training.ipynb @@ -50,7 +50,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget unidecode pysptk\n", From a775320a458ec3743803b4826828d00a8b41831c Mon Sep 17 00:00:00 2001 From: ericharper Date: Sun, 1 May 2022 23:38:20 -0600 Subject: [PATCH 091/244] update package info Signed-off-by: ericharper --- nemo/package_info.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/package_info.py b/nemo/package_info.py index cbb6f2141661..42635cb0005f 100644 --- a/nemo/package_info.py +++ b/nemo/package_info.py @@ -16,7 +16,7 @@ MAJOR = 1 MINOR = 9 PATCH = 0 -PRE_RELEASE = 'rc0' +PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) From e944e13ea6b884bc37c17f9360aa6a5eb4255f1a Mon Sep 17 00:00:00 2001 From: treacker Date: Mon, 2 May 2022 04:53:21 -0700 Subject: [PATCH 092/244] new exp --- examples/tts/conf/vits.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 575050d80e89..e62da06af1df 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -122,11 +122,11 @@ model: # drop_last: false # shuffle: true # batch_size: 32 - num_workers: 4 + num_workers: 8 pin_memory: false batch_sampler: - batch_size: 32 + batch_size: 96 boundaries: [32,300,400,500,600,700,800,900,1000] num_replicas: ${trainer.devices} shuffle: true @@ -200,7 +200,7 @@ trainer: devices: 2 accelerator: gpu strategy: ddp - precision: 32 + precision: 16 max_epochs: 1000000 accumulate_grad_batches: 1 # gradient_clip_val: 1000.0 @@ -211,7 +211,7 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_fp32 + exp_dir: ../exps/vits_bs96 name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -220,7 +220,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_fp32 + name: vits_bs96 project: ${name} entity: nvidia resume_if_exists: false From d6883e3c71c5a62d71dfbe93855b97a37a200fcb Mon Sep 17 00:00:00 2001 From: ericharper Date: Wed, 4 May 2022 12:07:00 -0600 Subject: [PATCH 093/244] update branch Signed-off-by: ericharper --- tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb index 99fc99358fd9..0474afa1c2a3 100644 --- a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb +++ b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.9.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, From 0623b0135543927dbe61b27c4d8e591679b13931 Mon Sep 17 00:00:00 2001 From: Boris Fomitchev Date: Wed, 4 May 2022 14:22:45 -0700 Subject: [PATCH 094/244] Restored tests previously disabled for 22.03 base (#4109) Signed-off-by: Boris Fomitchev --- nemo/collections/tts/modules/waveglow.py | 20 ++++++++++++------- tests/collections/nlp/test_huggingface.py | 6 ++---- tests/collections/nlp/test_nlp_exportables.py | 2 -- tests/collections/tts/test_tts_exportables.py | 8 +------- tests/collections/tts/test_waveglow.py | 1 - 5 files changed, 16 insertions(+), 21 deletions(-) diff --git a/nemo/collections/tts/modules/waveglow.py b/nemo/collections/tts/modules/waveglow.py index b9c9aacc05df..daa5405298db 100644 --- a/nemo/collections/tts/modules/waveglow.py +++ b/nemo/collections/tts/modules/waveglow.py @@ -131,13 +131,19 @@ def forward(self, spec, z=None, audio=None, run_inverse=True, sigma=1.0): @property def input_types(self): - return { - "spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), - "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), - "audio": NeuralType(('B', 'T'), AudioSignal(), optional=True), - "run_inverse": NeuralType(elements_type=IntType(), optional=True), - "sigma": NeuralType(optional=True), - } + if self.mode == OperationMode.infer: + return { + "spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), + "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), + } + else: + return { + "spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), + "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), + "audio": NeuralType(('B', 'T'), AudioSignal(), optional=True), + "run_inverse": NeuralType(elements_type=IntType(), optional=True), + "sigma": NeuralType(optional=True), + } @property def output_types(self): diff --git a/tests/collections/nlp/test_huggingface.py b/tests/collections/nlp/test_huggingface.py index 68b742470d82..cfe2845caa9b 100644 --- a/tests/collections/nlp/test_huggingface.py +++ b/tests/collections/nlp/test_huggingface.py @@ -49,8 +49,7 @@ def test_get_pretrained_bert_model(self): self.omega_conf.language_model.pretrained_model_name = 'bert-base-uncased' model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf) assert isinstance(model, nemo_nlp.modules.BertEncoder) - # TODO: Fix - # do_export(model, "bert-base-uncased") + do_export(model, "bert-base-uncased") @pytest.mark.with_downloads() @pytest.mark.unit @@ -74,8 +73,7 @@ def test_get_pretrained_albert_model(self): self.omega_conf.language_model.pretrained_model_name = 'albert-base-v1' model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf) assert isinstance(model, nemo_nlp.modules.AlbertEncoder) - # TODO: fix - # do_export(model, "albert-base-v1") + do_export(model, "albert-base-v1") @pytest.mark.with_downloads() @pytest.mark.unit diff --git a/tests/collections/nlp/test_nlp_exportables.py b/tests/collections/nlp/test_nlp_exportables.py index e0043cdced7b..21f65ec5d94b 100644 --- a/tests/collections/nlp/test_nlp_exportables.py +++ b/tests/collections/nlp/test_nlp_exportables.py @@ -99,7 +99,6 @@ def test_IntentSlotClassificationModel_export_to_onnx(self, dummy_data): assert onnx_model.graph.output[0].name == 'intent_logits' assert onnx_model.graph.output[1].name == 'slot_logits' - @pytest.mark.pleasefixme @pytest.mark.with_downloads() @pytest.mark.run_only_on('GPU') @pytest.mark.unit @@ -130,7 +129,6 @@ def test_PunctuationCapitalizationModel_export_to_onnx(self): assert onnx_model.graph.output[0].name == 'punct_logits' assert onnx_model.graph.output[1].name == 'capit_logits' - @pytest.mark.pleasefixme @pytest.mark.with_downloads() @pytest.mark.run_only_on('GPU') @pytest.mark.unit diff --git a/tests/collections/tts/test_tts_exportables.py b/tests/collections/tts/test_tts_exportables.py index 40318173bd62..d847c3cf95e0 100644 --- a/tests/collections/tts/test_tts_exportables.py +++ b/tests/collections/tts/test_tts_exportables.py @@ -23,23 +23,17 @@ @pytest.fixture() def fastpitch_model(): - test_root = os.path.dirname(os.path.abspath(__file__)) - conf = OmegaConf.load(os.path.join(test_root, '../../../examples/tts/conf/fastpitch_align_v1.05.yaml')) - conf.train_dataset = conf.validation_datasets = '.' - conf.model.train_ds = conf.model.test_ds = conf.model.validation_ds = None - model = FastPitchModel(cfg=conf.model) + model = FastPitchModel.from_pretrained(model_name="tts_en_fastpitch") return model @pytest.fixture() def hifigan_model(): - test_root = os.path.dirname(os.path.abspath(__file__)) model = HifiGanModel.from_pretrained(model_name="tts_hifigan") return model class TestExportable: - @pytest.mark.pleasefixme @pytest.mark.run_only_on('GPU') @pytest.mark.unit def test_FastPitchModel_export_to_onnx(self, fastpitch_model): diff --git a/tests/collections/tts/test_waveglow.py b/tests/collections/tts/test_waveglow.py index 0d2388b9a124..9198f01c4226 100644 --- a/tests/collections/tts/test_waveglow.py +++ b/tests/collections/tts/test_waveglow.py @@ -73,7 +73,6 @@ def forward_wrapper(self, spec, z=None): class TestWaveGlow: - @pytest.mark.pleasefixme @pytest.mark.run_only_on('GPU') @pytest.mark.unit def test_export_to_onnx(self): From c10fed40898809dca61ebf6c65318f87163ba715 Mon Sep 17 00:00:00 2001 From: Nithin Rao Date: Wed, 4 May 2022 19:27:43 -0700 Subject: [PATCH 095/244] add augmentation to label models (#4113) * add augmentation to label models Signed-off-by: nithinraok * duration fix Signed-off-by: nithinraok --- nemo/collections/asr/models/label_models.py | 8 ++++++++ scripts/speaker_tasks/scp_to_manifest.py | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/nemo/collections/asr/models/label_models.py b/nemo/collections/asr/models/label_models.py index 08f1bca7ae08..83f0572c6afd 100644 --- a/nemo/collections/asr/models/label_models.py +++ b/nemo/collections/asr/models/label_models.py @@ -107,6 +107,10 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None): self.task = None self._accuracy = TopKClassificationAccuracy(top_k=[1]) self.labels = None + if hasattr(self._cfg, 'spec_augment') and self._cfg.spec_augment is not None: + self.spec_augmentation = EncDecSpeakerLabelModel.from_config_dict(self._cfg.spec_augment) + else: + self.spec_augmentation = None @staticmethod def extract_labels(data_layer_config): @@ -258,6 +262,10 @@ def forward(self, input_signal, input_signal_length): processed_signal, processed_signal_len = self.preprocessor( input_signal=input_signal, length=input_signal_length, ) + + if self.spec_augmentation is not None and self.training: + processed_signal = self.spec_augmentation(input_spec=processed_signal, length=processed_signal_len) + encoded, length = self.encoder(audio_signal=processed_signal, length=processed_signal_len) logits, embs = self.decoder(encoder_output=encoded, length=length) return logits, embs diff --git a/scripts/speaker_tasks/scp_to_manifest.py b/scripts/speaker_tasks/scp_to_manifest.py index 58041cec7891..c146b13e1742 100644 --- a/scripts/speaker_tasks/scp_to_manifest.py +++ b/scripts/speaker_tasks/scp_to_manifest.py @@ -132,7 +132,7 @@ def read_manifest(manifest): def get_duration(json_line): dur = json_line['duration'] - if dur is not None: + if dur is None: wav_path = json_line['audio_filepath'] json_line['duration'] = sox.file_info.duration(wav_path) return json_line From d4f6d7571ced81393e945ba22b5d9de3af512501 Mon Sep 17 00:00:00 2001 From: Ramanathan Arunachalam Date: Thu, 5 May 2022 01:38:17 -0700 Subject: [PATCH 096/244] Call register_bert_model after assigning self.bert_model variable (#4116) Signed-off-by: Ramanathan Arunachalam Co-authored-by: Ramanathan Arunachalam --- nemo/collections/nlp/models/nlp_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/nemo/collections/nlp/models/nlp_model.py b/nemo/collections/nlp/models/nlp_model.py index 927b7924181b..54ac0a266638 100644 --- a/nemo/collections/nlp/models/nlp_model.py +++ b/nemo/collections/nlp/models/nlp_model.py @@ -119,9 +119,6 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None, no_lm_init=False): # Required to pull up the config for MegatronBert models self.pretrained_model_name = cfg.language_model.pretrained_model_name - # register encoder config - self.register_bert_model() - if ( cfg.tokenizer is not None and cfg.tokenizer.get("tokenizer_name", "") is not None @@ -133,6 +130,8 @@ def __init__(self, cfg: DictConfig, trainer: Trainer = None, no_lm_init=False): if cfg.get('language_model') and not no_lm_init: self.bert_model = bert_model + # register encoder config + self.register_bert_model() def register_artifact( self, config_path: str, src: str, verify_src_exists: bool = False, From f3df3434f3985791b8547b558be86b4c5a7af725 Mon Sep 17 00:00:00 2001 From: bene-ges <61418381+bene-ges@users.noreply.github.com> Date: Fri, 6 May 2022 01:02:09 +0300 Subject: [PATCH 097/244] Tutorial on ITN with Thutmose tagger and small fixes (#4117) * 1. Add tutorial. 2. Move a function to fix import in tutorial. 3. Merge multiple spaces into one space in the final output Signed-off-by: Alexandra Antonova * fixes for code review Signed-off-by: Alexandra Antonova * Add tutorial to tutorials.rst Signed-off-by: Alexandra Antonova Co-authored-by: Alexandra Antonova --- docs/source/starthere/tutorials.rst | 3 + .../prepare_corpora_after_alignment.py | 3 +- .../prepare_corpora_for_alignment.py | 2 +- .../dataset_preparation/utils.py | 207 ---- .../install_requirements.sh | 6 + .../text_normalization_as_tagging/tagging.py | 5 +- .../text_normalization_as_tagging/utils.py | 189 ++- .../ITN_with_Thutmose_Tagger.ipynb | 1047 +++++++++++++++++ .../thutmose_tagger_alignment_bottom.png | Bin 0 -> 18489 bytes .../images/thutmose_tagger_alignment_top.png | Bin 0 -> 13009 bytes .../images/thutmose_tagger_architecture.png | Bin 0 -> 47623 bytes .../thutmose_tagger_final_alignment.png | Bin 0 -> 18211 bytes .../images/thutmose_tagger_tag_vocabulary.png | Bin 0 -> 6134 bytes 13 files changed, 1249 insertions(+), 213 deletions(-) delete mode 100644 examples/nlp/text_normalization_as_tagging/dataset_preparation/utils.py create mode 100644 examples/nlp/text_normalization_as_tagging/install_requirements.sh create mode 100644 tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb create mode 100644 tutorials/text_processing/images/thutmose_tagger_alignment_bottom.png create mode 100644 tutorials/text_processing/images/thutmose_tagger_alignment_top.png create mode 100644 tutorials/text_processing/images/thutmose_tagger_architecture.png create mode 100644 tutorials/text_processing/images/thutmose_tagger_final_alignment.png create mode 100644 tutorials/text_processing/images/thutmose_tagger_tag_vocabulary.png diff --git a/docs/source/starthere/tutorials.rst b/docs/source/starthere/tutorials.rst index eb3cab1ee6bc..a526b28130e5 100644 --- a/docs/source/starthere/tutorials.rst +++ b/docs/source/starthere/tutorials.rst @@ -151,6 +151,9 @@ To run a tutorial: * - Text Processing - Inverse Text Normalization for ASR - `Inverse Text Normalization `_ + * - Text Processing + - Inverse Text Normalization for ASR - Thutmose Tagger + - `Inverse Text Normalization with Thutmose Tagger `_ * - Text Processing - Constructing Normalization Grammars with WFSTs - `WFST Tutorial `_ diff --git a/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py b/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py index 28caed27e533..33608b529c70 100644 --- a/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py +++ b/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py @@ -24,8 +24,7 @@ from collections import Counter from typing import Dict, Optional, TextIO, Tuple -from examples.nlp.text_normalization_as_tagging.dataset_preparation.utils import get_src_and_dst_for_alignment - +from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment from nemo.utils import logging parser = ArgumentParser(description="Produce data for the ThutmoseTaggerModel") diff --git a/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py b/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py index ca298f9ce5d9..9fe64c1105b8 100644 --- a/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py +++ b/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py @@ -47,7 +47,7 @@ from os.path import isdir, join from shutil import rmtree -from examples.nlp.text_normalization_as_tagging.dataset_preparation.utils import get_src_and_dst_for_alignment +from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment parser = ArgumentParser(description='Split corpus to subcorpora for giza alignment') parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data') diff --git a/examples/nlp/text_normalization_as_tagging/dataset_preparation/utils.py b/examples/nlp/text_normalization_as_tagging/dataset_preparation/utils.py deleted file mode 100644 index 906c1903024e..000000000000 --- a/examples/nlp/text_normalization_as_tagging/dataset_preparation/utils.py +++ /dev/null @@ -1,207 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import re -from typing import Tuple - -from nemo.collections.nlp.data.text_normalization_as_tagging.utils import split_text_by_isalpha, spoken_preprocessing - -"""Utility functions for Thutmose Tagger data preparation.""" - - -def get_src_and_dst_for_alignment( - semiotic_class: str, written: str, spoken: str, lang: str -) -> Tuple[str, str, str, str]: - """Tokenize written and spoken span. - Args: - semiotic_class: str - lowercase semiotic class, ex. "cardinal" - written: str - written form, ex. "2015 году" - spoken: str - spoken form, ex. "две тысячи пятнадцатом году" - lang: str - language - Return: - src: str - written part, where digits and foreign letters are tokenized by characters, ex. "2 0 1 5" - dst: str - spoken part tokenized by space, ex. "две тысячи пятнадцатом" - same_begin: str - same_end: str - """ - written = written.casefold() - # ATTENTION!!! This is INPUT transformation! Need to do the same at inference time! - spoken = spoken_preprocessing(spoken) - - # remove same fragments at the beginning or at the end of spoken and written form - written_parts = written.split() - spoken_parts = spoken.split() - same_from_begin = 0 - same_from_end = 0 - for i in range(min(len(written_parts), len(spoken_parts))): - if written_parts[i] == spoken_parts[i]: - same_from_begin += 1 - else: - break - for i in range(min(len(written_parts), len(spoken_parts))): - if written_parts[-i - 1] == spoken_parts[-i - 1]: - same_from_end += 1 - else: - break - same_begin = written_parts[0:same_from_begin] - same_end = [] - if same_from_end == 0: - written = " ".join(written_parts[same_from_begin:]) - spoken = " ".join(spoken_parts[same_from_begin:]) - else: - written = " ".join(written_parts[same_from_begin:-same_from_end]) - spoken = " ".join(spoken_parts[same_from_begin:-same_from_end]) - same_end = written_parts[-same_from_end:] - - fragments = list(split_text_by_isalpha(written)) - written_tokens = [] - for frag in fragments: - if frag.isalpha(): - if semiotic_class == "plain" or semiotic_class == "letters" or semiotic_class == "electronic": - chars = list(frag.strip()) - chars[0] = "_" + chars[0] # prepend first symbol of a word with underscore - chars[-1] = chars[-1] + "_" # append underscore to the last symbol - written_tokens += chars - else: - written_tokens.append("_" + frag + "_") - else: - chars = list(frag.strip().replace(" ", "")) - if len(chars) > 0: - chars[0] = "_" + chars[0] # prepend first symbol of a non-alpha fragment with underscore - chars[-1] = chars[-1] + "_" # append underscore to the last symbol of a non-alpha fragment - written_tokens += chars - written_str = " ".join(written_tokens) - - # _н_ _._ _г_ _._ => _н._ _г._ - written_str = re.sub( - r"([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя])_ _\._", r"\g<1>._", written_str - ) - # _тыс_ _. $ => _тыс._ _$ - written_str = re.sub( - r"([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя])_ _\. ([^_])]", r"\g<1>._ _\g<2>", written_str - ) - - if semiotic_class == "ordinal": - # _8 2 -_ _ом_ => _8 2-ом_ - written_str = re.sub( - r"([\d]) -_ _([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]+)_", - r"\g<1>-\g<2>_", - written_str, - ) - # _8 8_ _й_ _8 8й_ - written_str = re.sub( - r"([\d])_ _([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]+)_", r"\g<1>\g<2>_", written_str - ) - - if semiotic_class == "cardinal": - # _2 5 -_ _ти_ => _2 5-ти_ - written_str = re.sub(r"([\d]) -_ _(ти)_", r"\g<1>-\g<2>_", written_str) - written_str = re.sub(r"([\d]) -_ _(и)_", r"\g<1>-\g<2>_", written_str) - written_str = re.sub(r"([\d]) -_ _(мя)_", r"\g<1>-\g<2>_", written_str) - written_str = re.sub(r"([\d]) -_ _(ех)_", r"\g<1>-\g<2>_", written_str) - - # _i b m_ _'_ _s_ => _i b m's_ - if lang == "en": - written_str = re.sub(r"_ _'_ _s_", r"'s_", written_str) - - if semiotic_class == "date" and lang == "en": - # _1 9 8 0_ _s_ => _1 9 8 0s_ - written_str = re.sub(r"([\d])_ _s_", r"\g<1>s_", written_str) - # _1 9 5 0 '_ _s_ => _1 9 5 0's_ - written_str = re.sub(r"([\d]) '_ _s_", r"\g<1>'s_", written_str) - # _wednesday_ _2 6_ _th_ _september_ _2 0 1 2_ => _wednesday_ _2 6th_ _september_ _2 0 1 2_ - written_str = re.sub(r"([\d])_ _th_", r"\g<1>th_", written_str) - # _wednesday_ _may_ _2 1_ _st_ _, 2 0 1 4_ => _wednesday_ _may_ _2 1st_ _, 2 0 1 4_ - written_str = re.sub(r"([\d])_ _st_", r"\g<1>st_", written_str) - # _wednesday_ _2 3_ _rd_ _july_ _2 0 1 4_ => _wednesday_ _2 3rd_ _july_ _2 0 1 4_ - written_str = re.sub(r"([\d])_ _rd_", r"\g<1>rd_", written_str) - # _wednesday_ _2 2_ _nd_ _july_ _2 0 1 4_ => _wednesday_ _2 2nd_ _july_ _2 0 1 4_ - written_str = re.sub(r"([\d])_ _nd_", r"\g<1>nd_", written_str) - - written_str = re.sub(r"_mon_ _\. ", r"_mon._ ", written_str) - written_str = re.sub(r"_tue_ _\. ", r"_tue._ ", written_str) - written_str = re.sub(r"_wen_ _\. ", r"_wen._ ", written_str) - written_str = re.sub(r"_thu_ _\. ", r"_thu._ ", written_str) - written_str = re.sub(r"_fri_ _\. ", r"_fri._ ", written_str) - written_str = re.sub(r"_sat_ _\. ", r"_sat._ ", written_str) - written_str = re.sub(r"_sun_ _\. ", r"_sun._ ", written_str) - - written_str = re.sub(r"_jan_ _\. ", r"_jan._ ", written_str) - written_str = re.sub(r"_feb_ _\. ", r"_feb._ ", written_str) - written_str = re.sub(r"_mar_ _\. ", r"_mar._ ", written_str) - written_str = re.sub(r"_apr_ _\. ", r"_apr._ ", written_str) - written_str = re.sub(r"_may_ _\. ", r"_may._ ", written_str) - written_str = re.sub(r"_jun_ _\. ", r"_jun._ ", written_str) - written_str = re.sub(r"_jul_ _\. ", r"_jul._ ", written_str) - written_str = re.sub(r"_aug_ _\. ", r"_aug._ ", written_str) - written_str = re.sub(r"_sep_ _\. ", r"_sep._ ", written_str) - written_str = re.sub(r"_oct_ _\. ", r"_oct._ ", written_str) - written_str = re.sub(r"_nov_ _\. ", r"_nov._ ", written_str) - written_str = re.sub(r"_dec_ _\. ", r"_dec._ ", written_str) - - if semiotic_class == "date" and lang == "ru": - # _1 8 . 0 8 . 2 0 0 1_ => _1 8_ .08. _2 0 0 1_ - # _1 8 / 0 8 / 2 0 0 1_ => _1 8_ /08/ _2 0 0 1_ - # _1 8 - 0 8 - 2 0 0 1_ => _1 8_ -08- _2 0 0 1_ - written_str = re.sub(r"([\d]) \. ([01]) ([0123456789]) \. ([\d])", r"\g<1>_ .\g<2>\g<3>. _\g<4>", written_str) - written_str = re.sub(r"([\d]) / ([01]) ([0123456789]) / ([\d])", r"\g<1>_ /\g<2>\g<3>/ _\g<4>", written_str) - written_str = re.sub(r"([\d]) - ([01]) ([0123456789]) - ([\d])", r"\g<1>_ -\g<2>\g<3>- _\g<4>", written_str) - # _1 8 . 8 . 2 0 0 1_ => _1 8_ .8. _2 0 0 1_ - # _1 8 / 8 / 2 0 0 1_ => _1 8_ /8/ _2 0 0 1_ - # _1 8 - 8 - 2 0 0 1_ => _1 8_ -8- _2 0 0 1_ - written_str = re.sub(r"([\d]) \. ([123456789]) \. ([\d])", r"\g<1>_ .\g<2>. _\g<3>", written_str) - written_str = re.sub(r"([\d]) / ([123456789]) / ([\d])", r"\g<1>_ /\g<2>/ _\g<3>", written_str) - written_str = re.sub(r"([\d]) - ([123456789]) - ([\d])", r"\g<1>_ -\g<2>- _\g<3>", written_str) - - if semiotic_class == "money": - # if a span start with currency, move it to the end - # "_$ 2 5_" => "_2 5_ _$<<" #<< means "at post-processing move to the beginning of th semiotic span" - written_str = re.sub( - r"^(_[^0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]) ([\d].*)$", - r"_\g<2> \g<1><<", - written_str, - ) - - # "_us_ _$ 7 0 0_" => "_us__$ 7 0 0_" - written_str = re.sub(r"^_us_ _\$ ([\d].*)$", r"_\g<1> _us__$<<", written_str) - - # "_2 5 $_" => "_2 5_ _$_" #insert space between last digit and dollar sign - written_str = re.sub( - r"([\d]) ([^0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя_]_)", - r"\g<1>_ _\g<2>", - written_str, - ) - - if semiotic_class == "time": - # "_pm_ _1 0_" => "_1 0_ _pm_<<" - written_str = re.sub(r"^(_[ap]m_) (_[\d].*)$", r"\g<2> \g<1><<", written_str) - - # "_8 : 0 0_ _a._ _m._ => _8:00_ _a._ _m._" - # "_1 2 : 0 0_ _a._ _m._ => _1 2:00_ _a._ _m._" - written_str = re.sub(r"(\d) [:.] 0 0_", r"\g<1>:00_", written_str) - - # "_2 : 4 2 : 4 4_" => "_2: 4 2: 4 4_" - written_str = re.sub(r"(\d) [:.] ", r"\g<1>: ", written_str) - - if semiotic_class == "measure": - # "_6 5 8_ _см_ _³ ._" => " _6 5 8_ _³> _см._" - # > means "at post-processing swap with the next token to the right" - written_str = re.sub( - r"(_[abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя.]+_) (_[³²]_?)", - r"\g<2>> \g<1>", - written_str, - ) - - return written_str, spoken, " ".join(same_begin), " ".join(same_end) diff --git a/examples/nlp/text_normalization_as_tagging/install_requirements.sh b/examples/nlp/text_normalization_as_tagging/install_requirements.sh new file mode 100644 index 000000000000..f54a6cb3f8fa --- /dev/null +++ b/examples/nlp/text_normalization_as_tagging/install_requirements.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +git clone https://github.com/moses-smt/giza-pp.git giza-pp +cd giza-pp +make +cd .. diff --git a/nemo/collections/nlp/data/text_normalization_as_tagging/tagging.py b/nemo/collections/nlp/data/text_normalization_as_tagging/tagging.py index 6b17377b0225..6e27b561614e 100644 --- a/nemo/collections/nlp/data/text_normalization_as_tagging/tagging.py +++ b/nemo/collections/nlp/data/text_normalization_as_tagging/tagging.py @@ -207,9 +207,10 @@ def realize_output(self, tags: List[Tag], semiotic_labels: List[str]) -> Tuple[s output_tokens.append(frag.replace(" ", "").replace("_", "")) else: output_tokens.append(frag.strip().replace("_", "")) - + output_str = " ".join(output_tokens) + output_str = re.sub(r" +", " ", output_str) return ( - " ".join(output_tokens), + output_str, " ".join(self.source_tokens), " ".join(out_tags_without_swap), output_tags_with_swap_str, diff --git a/nemo/collections/nlp/data/text_normalization_as_tagging/utils.py b/nemo/collections/nlp/data/text_normalization_as_tagging/utils.py index 562f4703464c..253f7a41c703 100644 --- a/nemo/collections/nlp/data/text_normalization_as_tagging/utils.py +++ b/nemo/collections/nlp/data/text_normalization_as_tagging/utils.py @@ -15,7 +15,7 @@ import re from itertools import groupby -from typing import Dict, List +from typing import Dict, List, Tuple """Utility functions for Thutmose Tagger.""" @@ -118,3 +118,190 @@ def spoken_preprocessing(spoken: str) -> str: spoken = re.sub(r" фунтом стерлингов", r" фунтом-стерлингов", spoken) return spoken + + +## This function is used only in data preparation (examples/nlp/normalisation_as_tagging/dataset_preparation) +def get_src_and_dst_for_alignment( + semiotic_class: str, written: str, spoken: str, lang: str +) -> Tuple[str, str, str, str]: + """Tokenize written and spoken span. + Args: + semiotic_class: str - lowercase semiotic class, ex. "cardinal" + written: str - written form, ex. "2015 году" + spoken: str - spoken form, ex. "две тысячи пятнадцатом году" + lang: str - language + Return: + src: str - written part, where digits and foreign letters are tokenized by characters, ex. "2 0 1 5" + dst: str - spoken part tokenized by space, ex. "две тысячи пятнадцатом" + same_begin: str + same_end: str + """ + written = written.casefold() + # ATTENTION!!! This is INPUT transformation! Need to do the same at inference time! + spoken = spoken_preprocessing(spoken) + + # remove same fragments at the beginning or at the end of spoken and written form + written_parts = written.split() + spoken_parts = spoken.split() + same_from_begin = 0 + same_from_end = 0 + for i in range(min(len(written_parts), len(spoken_parts))): + if written_parts[i] == spoken_parts[i]: + same_from_begin += 1 + else: + break + for i in range(min(len(written_parts), len(spoken_parts))): + if written_parts[-i - 1] == spoken_parts[-i - 1]: + same_from_end += 1 + else: + break + same_begin = written_parts[0:same_from_begin] + same_end = [] + if same_from_end == 0: + written = " ".join(written_parts[same_from_begin:]) + spoken = " ".join(spoken_parts[same_from_begin:]) + else: + written = " ".join(written_parts[same_from_begin:-same_from_end]) + spoken = " ".join(spoken_parts[same_from_begin:-same_from_end]) + same_end = written_parts[-same_from_end:] + + fragments = list(split_text_by_isalpha(written)) + written_tokens = [] + for frag in fragments: + if frag.isalpha(): + if semiotic_class == "plain" or semiotic_class == "letters" or semiotic_class == "electronic": + chars = list(frag.strip()) + chars[0] = "_" + chars[0] # prepend first symbol of a word with underscore + chars[-1] = chars[-1] + "_" # append underscore to the last symbol + written_tokens += chars + else: + written_tokens.append("_" + frag + "_") + else: + chars = list(frag.strip().replace(" ", "")) + if len(chars) > 0: + chars[0] = "_" + chars[0] # prepend first symbol of a non-alpha fragment with underscore + chars[-1] = chars[-1] + "_" # append underscore to the last symbol of a non-alpha fragment + written_tokens += chars + written_str = " ".join(written_tokens) + + # _н_ _._ _г_ _._ => _н._ _г._ + written_str = re.sub( + r"([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя])_ _\._", r"\g<1>._", written_str + ) + # _тыс_ _. $ => _тыс._ _$ + written_str = re.sub( + r"([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя])_ _\. ([^_])]", r"\g<1>._ _\g<2>", written_str + ) + + if semiotic_class == "ordinal": + # _8 2 -_ _ом_ => _8 2-ом_ + written_str = re.sub( + r"([\d]) -_ _([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]+)_", + r"\g<1>-\g<2>_", + written_str, + ) + # _8 8_ _й_ _8 8й_ + written_str = re.sub( + r"([\d])_ _([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]+)_", r"\g<1>\g<2>_", written_str + ) + + if semiotic_class == "cardinal": + # _2 5 -_ _ти_ => _2 5-ти_ + written_str = re.sub(r"([\d]) -_ _(ти)_", r"\g<1>-\g<2>_", written_str) + written_str = re.sub(r"([\d]) -_ _(и)_", r"\g<1>-\g<2>_", written_str) + written_str = re.sub(r"([\d]) -_ _(мя)_", r"\g<1>-\g<2>_", written_str) + written_str = re.sub(r"([\d]) -_ _(ех)_", r"\g<1>-\g<2>_", written_str) + + # _i b m_ _'_ _s_ => _i b m's_ + if lang == "en": + written_str = re.sub(r"_ _'_ _s_", r"'s_", written_str) + + if semiotic_class == "date" and lang == "en": + # _1 9 8 0_ _s_ => _1 9 8 0s_ + written_str = re.sub(r"([\d])_ _s_", r"\g<1>s_", written_str) + # _1 9 5 0 '_ _s_ => _1 9 5 0's_ + written_str = re.sub(r"([\d]) '_ _s_", r"\g<1>'s_", written_str) + # _wednesday_ _2 6_ _th_ _september_ _2 0 1 2_ => _wednesday_ _2 6th_ _september_ _2 0 1 2_ + written_str = re.sub(r"([\d])_ _th_", r"\g<1>th_", written_str) + # _wednesday_ _may_ _2 1_ _st_ _, 2 0 1 4_ => _wednesday_ _may_ _2 1st_ _, 2 0 1 4_ + written_str = re.sub(r"([\d])_ _st_", r"\g<1>st_", written_str) + # _wednesday_ _2 3_ _rd_ _july_ _2 0 1 4_ => _wednesday_ _2 3rd_ _july_ _2 0 1 4_ + written_str = re.sub(r"([\d])_ _rd_", r"\g<1>rd_", written_str) + # _wednesday_ _2 2_ _nd_ _july_ _2 0 1 4_ => _wednesday_ _2 2nd_ _july_ _2 0 1 4_ + written_str = re.sub(r"([\d])_ _nd_", r"\g<1>nd_", written_str) + + written_str = re.sub(r"_mon_ _\. ", r"_mon._ ", written_str) + written_str = re.sub(r"_tue_ _\. ", r"_tue._ ", written_str) + written_str = re.sub(r"_wen_ _\. ", r"_wen._ ", written_str) + written_str = re.sub(r"_thu_ _\. ", r"_thu._ ", written_str) + written_str = re.sub(r"_fri_ _\. ", r"_fri._ ", written_str) + written_str = re.sub(r"_sat_ _\. ", r"_sat._ ", written_str) + written_str = re.sub(r"_sun_ _\. ", r"_sun._ ", written_str) + + written_str = re.sub(r"_jan_ _\. ", r"_jan._ ", written_str) + written_str = re.sub(r"_feb_ _\. ", r"_feb._ ", written_str) + written_str = re.sub(r"_mar_ _\. ", r"_mar._ ", written_str) + written_str = re.sub(r"_apr_ _\. ", r"_apr._ ", written_str) + written_str = re.sub(r"_may_ _\. ", r"_may._ ", written_str) + written_str = re.sub(r"_jun_ _\. ", r"_jun._ ", written_str) + written_str = re.sub(r"_jul_ _\. ", r"_jul._ ", written_str) + written_str = re.sub(r"_aug_ _\. ", r"_aug._ ", written_str) + written_str = re.sub(r"_sep_ _\. ", r"_sep._ ", written_str) + written_str = re.sub(r"_oct_ _\. ", r"_oct._ ", written_str) + written_str = re.sub(r"_nov_ _\. ", r"_nov._ ", written_str) + written_str = re.sub(r"_dec_ _\. ", r"_dec._ ", written_str) + + if semiotic_class == "date" and lang == "ru": + # _1 8 . 0 8 . 2 0 0 1_ => _1 8_ .08. _2 0 0 1_ + # _1 8 / 0 8 / 2 0 0 1_ => _1 8_ /08/ _2 0 0 1_ + # _1 8 - 0 8 - 2 0 0 1_ => _1 8_ -08- _2 0 0 1_ + written_str = re.sub(r"([\d]) \. ([01]) ([0123456789]) \. ([\d])", r"\g<1>_ .\g<2>\g<3>. _\g<4>", written_str) + written_str = re.sub(r"([\d]) / ([01]) ([0123456789]) / ([\d])", r"\g<1>_ /\g<2>\g<3>/ _\g<4>", written_str) + written_str = re.sub(r"([\d]) - ([01]) ([0123456789]) - ([\d])", r"\g<1>_ -\g<2>\g<3>- _\g<4>", written_str) + # _1 8 . 8 . 2 0 0 1_ => _1 8_ .8. _2 0 0 1_ + # _1 8 / 8 / 2 0 0 1_ => _1 8_ /8/ _2 0 0 1_ + # _1 8 - 8 - 2 0 0 1_ => _1 8_ -8- _2 0 0 1_ + written_str = re.sub(r"([\d]) \. ([123456789]) \. ([\d])", r"\g<1>_ .\g<2>. _\g<3>", written_str) + written_str = re.sub(r"([\d]) / ([123456789]) / ([\d])", r"\g<1>_ /\g<2>/ _\g<3>", written_str) + written_str = re.sub(r"([\d]) - ([123456789]) - ([\d])", r"\g<1>_ -\g<2>- _\g<3>", written_str) + + if semiotic_class == "money": + # if a span start with currency, move it to the end + # "_$ 2 5_" => "_2 5_ _$<<" #<< means "at post-processing move to the beginning of th semiotic span" + written_str = re.sub( + r"^(_[^0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]) ([\d].*)$", + r"_\g<2> \g<1><<", + written_str, + ) + + # "_us_ _$ 7 0 0_" => "_us__$ 7 0 0_" + written_str = re.sub(r"^_us_ _\$ ([\d].*)$", r"_\g<1> _us__$<<", written_str) + + # "_2 5 $_" => "_2 5_ _$_" #insert space between last digit and dollar sign + written_str = re.sub( + r"([\d]) ([^0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя_]_)", + r"\g<1>_ _\g<2>", + written_str, + ) + + if semiotic_class == "time": + # "_pm_ _1 0_" => "_1 0_ _pm_<<" + written_str = re.sub(r"^(_[ap]m_) (_[\d].*)$", r"\g<2> \g<1><<", written_str) + + # "_8 : 0 0_ _a._ _m._ => _8:00_ _a._ _m._" + # "_1 2 : 0 0_ _a._ _m._ => _1 2:00_ _a._ _m._" + written_str = re.sub(r"(\d) [:.] 0 0_", r"\g<1>:00_", written_str) + + # "_2 : 4 2 : 4 4_" => "_2: 4 2: 4 4_" + written_str = re.sub(r"(\d) [:.] ", r"\g<1>: ", written_str) + + if semiotic_class == "measure": + # "_6 5 8_ _см_ _³ ._" => " _6 5 8_ _³> _см._" + # > means "at post-processing swap with the next token to the right" + written_str = re.sub( + r"(_[abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя.]+_) (_[³²]_?)", + r"\g<2>> \g<1>", + written_str, + ) + + return written_str, spoken, " ".join(same_begin), " ".join(same_end) diff --git a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb new file mode 100644 index 000000000000..50bf88a1e25c --- /dev/null +++ b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb @@ -0,0 +1,1047 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VFOY_ljrReXk" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", + "4. Run this cell to set up dependencies.\n", + "\"\"\"\n", + "\n", + "import os\n", + "\n", + "# install NeMo\n", + "BRANCH = 'r1.9.0'\n", + "\n", + "GITHUB_ACCOUNT = 'NVIDIA' # change this if using a fork\n", + "\n", + "# either provide a path to local NeMo repository with NeMo already installed or git clone\n", + "\n", + "# option #1: local path to NeMo repo with NeMo already installed\n", + "NEMO_DIR_PATH = \"NeMo\"\n", + "\n", + "# option #2: download NeMo repo\n", + "if 'google.colab' in str(get_ipython()) or not os.path.exists(NEMO_DIR_PATH):\n", + " ! git clone -b $BRANCH https://github.com/{GITHUB_ACCOUNT}/NeMo\n", + " % cd NeMo\n", + " ! python -m pip install git+https://github.com/{GITHUB_ACCOUNT}/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", + " % cd .." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "V8RfIztARxKH" + }, + "outputs": [], + "source": [ + "# If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error:\n", + "# 'ImportError: IProgress not found. Please update jupyter and ipywidgets.'\n", + "\n", + "! pip install ipywidgets\n", + "! jupyter nbextension enable --py widgetsnbextension\n", + "\n", + "# Please restart the kernel after running this cell" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oaTOPJHhTteF" + }, + "source": [ + "# Task Description\n", + "**Inverse text normalization (ITN)** is an important post-processing step within an automatic speech recognition (ASR) system. \n", + "ITN transforms spoken-domain text into its written form:\n", + "\n", + "> **Input:** \"on may third we paid one hundred and twenty three dollars\"\n", + "\n", + "> **Output:** \"on may 3 we paid \\$123\".\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sxo-kHP7frEX" + }, + "source": [ + "# Thutmose Tagger approach\n", + "We aim to do the following:\n", + "1. Align ITN expressions from the [Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish) on a granular level using [GIZA++](https://github.com/moses-smt/giza-pp), to get a monotonic one-to-one correspondence between each *spoken word* and corresponding *fragments* in written form. \n", + "2. Get a restricted vocabulary of target fragments (tags) that can cover most spoken-written pair conversions.\n", + "3. Build training dataset, where the input is the sentence in spoken form and the output is tags for all input words. \n", + "4. Train a token classifier neural model (see Figure below). \n", + "5. Apply a simple postprocessing procedure upon the tag sequence to get the final output" + ] + }, + { + "cell_type": "markdown", + "source": [ + "![Thutmose Tagger Architecture](images/thutmose_tagger_architecture.png)" + ], + "metadata": { + "id": "RG403l1gKyRy" + } + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aMPeNtAracI9" + }, + "source": [ + "# Dataset\n", + "\n", + "The full English part of [Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish) consists of 1.1 billion words. For this tutorial we use only small subset of it.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EqnuAgNNcVY-" + }, + "outputs": [], + "source": [ + "! wget \"https://multilangaudiosamples.s3.us-east-2.amazonaws.com/en_data_small.zip\" \".\"\n", + "! unzip en_data_small" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CkJ3LLaxRNFh" + }, + "outputs": [], + "source": [ + "## actually we do not need separate dev and test data in this tutorial, so just copy it \n", + "!cp -r en_data_small/test en_data_small/dev" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HLoXgnpMVACe" + }, + "source": [ + "\n", + "The dataset contains unnormalized (i.e. written form) and normalized (i.e. spoken form) sentence pairs that are aligned *on a phrase-level*. The normalized text is synthetic - obtained with the [Kestrel TTS text normalization system](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA), so it’s not considered 100% correct.\n", + "\n", + "```\n", + "PLAIN Retrieved \n", + "DATE 18 April 2013 the eighteenth of april twenty thirteen\n", + "PUNCT . sil\n", + " \n", + "PLAIN Neuhorst \n", + "PUNCT ( sil\n", + "PLAIN Canada \n", + "DATE 2006 two thousand six\n", + "PLAIN Census \n", + "PLAIN population \n", + "CARDINAL 126 one hundred twenty six\n", + "PUNCT ) sil\n", + "PLAIN is \n", + "PLAIN a \n", + "PLAIN small \n", + "PLAIN hamlet \n", + "PLAIN in \n", + "PLAIN Saskatchewan \n", + "PUNCT , sil\n", + "PLAIN Canada \n", + "PLAIN about \n", + "CARDINAL 30 thirty\n", + "PLAIN minutes \n", + "PLAIN north \n", + "PLAIN of \n", + "PLAIN Saskatoon \n", + "PUNCT . sil\n", + " \n", + "```\n", + "\n", + "The following classes appear in the dataset:\n", + "* ADDRESS\n", + "* CARDINAL\n", + "* DATE\n", + "* DECIMAL\n", + "* DIGIT\n", + "* ELECTRONIC\n", + "* FRACTION\n", + "* LETTERS\n", + "* MEASURE\n", + "* MONEY\n", + "* ORDINAL\n", + "* PLAIN\n", + "* PUNCT\n", + "* TELEPHONE\n", + "* TIME\n", + "* VERBATIM\n", + "\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "source": [ + "# 1. Align ITN expressions on a granular-level" + ], + "metadata": { + "id": "rewQY1pbPeq8" + } + }, + { + "cell_type": "markdown", + "source": [ + "Let's download and compile GIZA++ as we will need it soon" + ], + "metadata": { + "id": "5cLXx7qdPpUK" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LNLjIDUJdY5f" + }, + "outputs": [], + "source": [ + "! git clone https://github.com/moses-smt/giza-pp.git giza-pp\n", + "%cd giza-pp\n", + "! ls\n", + "! make\n", + "%cd .." + ] + }, + { + "cell_type": "markdown", + "source": [ + "Do some imports" + ], + "metadata": { + "id": "2AfIeiu_P0Ik" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mz8_O4kfS0fH" + }, + "outputs": [], + "source": [ + "from nemo.collections import nlp as nemo_nlp\n", + "from nemo.utils.exp_manager import exp_manager\n", + "import nemo\n", + "\n", + "import wget \n", + "import torch\n", + "import pytorch_lightning as pl\n", + "from omegaconf import OmegaConf\n", + "import pandas as pd" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uYW_qsDgkhCw" + }, + "source": [ + "First we need to prepare the input data for the aligner (GIZA++).\n", + "\n", + "We regard the corpus of ITN phrase-pairs as a parallel corpus. Parallel means that each pair has an equivalent meaning, though they may consist of any number of tokens and the task of an aligner is to find which source tokens corresponds to which target tokens.\n", + "The spoken phrase is tokenized by word boundary, while the written phrase is tokenized as follows: \n", + "1. All alphabetic sequences are separate tokens\n", + "2. In numeric sequences each character is a separate token.\n", + "3. All non-alphanumeric characters are separate tokens.\n", + "4. We add an underscore symbol to mark the beginning and end of a\n", + "sequence for future detokenization. \n", + "\n", + "Example\n", + "> **Spoken:** `january thirtieth two thousand five`\n", + "\n", + "> **Written initial:** `jan 30, 2005`\n", + "\n", + "> **Written tokenized**: `_jan_ _3 0 , 2 0 0 5_`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "miXYxOv_mNVo" + }, + "source": [ + "The script [prepare_corpora_for_alignment.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py) prepares the described parallel corpora. It extracts all unique ITN phrase-pairs from the Google TN dataset, tokenizes them as described above and stores in separate folders for each semiotic class. It also generates a bash script for running the alignment. At the end it prints how many examples it has found:\n", + "```\n", + "content/alignment/punct has 920953 instances\n", + "content/alignment/date has 150499 instances\n", + "content/alignment/letters has 68340 instances\n", + "content/alignment/cardinal has 61029 instances\n", + "...\n", + "``` " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_A12y5zNn4O0" + }, + "outputs": [], + "source": [ + "WORK_DIR=!pwd # returns array containing a single path, \n", + "WORK_DIR=WORK_DIR[0]\n", + "\n", + "CORPUS_LANG=\"en\"\n", + "if 'google.colab' in str(get_ipython()) or not os.path.exists(NEMO_DIR_PATH):\n", + " NEMO_PATH=WORK_DIR + \"/NeMo\"\n", + "else:\n", + " NEMO_PATH=NEMO_DIR_PATH\n", + "GIZA_BIN_DIR=WORK_DIR + \"/giza-pp/GIZA++-v2\"\n", + "MCKLS_BINARY=WORK_DIR + \"/giza-pp/mkcls-v2/mkcls\"\n", + "CORPUS_DIR=WORK_DIR + \"/en_data_small\"\n", + "ALIGNMENT_DIR=WORK_DIR + \"/alignment\"\n", + "\n", + "!mkdir {ALIGNMENT_DIR}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BguRSoIXesFx" + }, + "outputs": [], + "source": [ + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py \\\n", + " --data_dir={CORPUS_DIR} \\\n", + " --out_dir={ALIGNMENT_DIR} \\\n", + " --giza_dir={GIZA_BIN_DIR} \\\n", + " --mckls_binary={MCKLS_BINARY} \\\n", + " --lang={CORPUS_LANG}" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Let's exclude punct class, as our itn task doesn't require to restore punctuation marks" + ], + "metadata": { + "id": "v8LscfJrLUeg" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VNOQ4nW2yF6I" + }, + "outputs": [], + "source": [ + "!rm -r {ALIGNMENT_DIR}/punct\n" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Let's run GIZA++ alignment. \n", + "In this tutorial we only work with three semiotic classes: date, letters and cardinal (in real setting all classes are used, excluding punct).\n", + "\n", + "**Attention**: the environment variable USER should be defined with any value, otherwise GIZA++ ends with segmenation fault. " + ], + "metadata": { + "id": "uUQMhEKGT7gv" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UPxcPu0_Xh2Y" + }, + "outputs": [], + "source": [ + "!chmod +x {ALIGNMENT_DIR}/date/run.sh\n", + "!chmod +x {ALIGNMENT_DIR}/letters/run.sh\n", + "!chmod +x {ALIGNMENT_DIR}/cardinal/run.sh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gOCv-ctbU3Rv" + }, + "outputs": [], + "source": [ + "## It is necessary to specify environment variable USER=, otherwise GIZA++ terminates with a segfault \n", + "\n", + "%cd {ALIGNMENT_DIR}/date\n", + "! export USER=\"user\"; ./run.sh\n", + "%cd ../..\n", + "\n", + "%cd {ALIGNMENT_DIR}/letters\n", + "! export USER=\"user\"; ./run.sh\n", + "%cd ../..\n", + "\n", + "%cd {ALIGNMENT_DIR}/cardinal\n", + "! export USER=\"user\"; ./run.sh\n", + "%cd ../.." + ] + }, + { + "cell_type": "markdown", + "source": [ + "GIZA++ will generate many files in our class folders, but we need only two files with final alignments, those with suffixes `A3.final`. The two files correspond to the alignments produced by two GIZA++ runs - direct and reverse (switching source and target corpus). This is a common practice, it allows us to find safer alignment points - tokens that were aligned to one another in both runs. The script [extract_giza_alignments.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py) heuristically combines these two GIZA++ alignments. It also applies a bunch of regular expressions to correct some alignment mistakes." + ], + "metadata": { + "id": "ueJYVF0cU3ic" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j5WpPkzHNICP" + }, + "outputs": [], + "source": [ + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", + " --mode=itn \\\n", + " --giza_dir={ALIGNMENT_DIR}/date \\\n", + " --giza_suffix=\"A3.final\" \\\n", + " --out_filename=itn.out \\\n", + " --lang={CORPUS_LANG}\n", + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", + " --mode=itn \\\n", + " --giza_dir={ALIGNMENT_DIR}/letters \\\n", + " --giza_suffix=\"A3.final\" \\\n", + " --out_filename=itn.out \\\n", + " --lang={CORPUS_LANG}\n", + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", + " --mode=itn \\\n", + " --giza_dir={ALIGNMENT_DIR}/cardinal \\\n", + " --giza_suffix=\"A3.final\" \\\n", + " --out_filename=itn.out \\\n", + " --lang={CORPUS_LANG}" + ] + }, + { + "cell_type": "markdown", + "source": [ + "When we prepared the input corpus of ITN pairs for GIZA++, we uniqualized them and stored the frequencies in a separate file `freq`. Now let's append the frequencies to the resulting alignments." + ], + "metadata": { + "id": "vpqiKrS6XBlP" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "fxZ5jjUPlOFv" + }, + "outputs": [], + "source": [ + "!paste -d\"\\t\" {ALIGNMENT_DIR}/date/freq {ALIGNMENT_DIR}/date/itn.out > {ALIGNMENT_DIR}/date/itn.out2\n", + "!paste -d\"\\t\" {ALIGNMENT_DIR}/letters/freq {ALIGNMENT_DIR}/letters/itn.out > {ALIGNMENT_DIR}/letters/itn.out2\n", + "!paste -d\"\\t\" {ALIGNMENT_DIR}/cardinal/freq {ALIGNMENT_DIR}/cardinal/itn.out > {ALIGNMENT_DIR}/cardinal/itn.out2" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Let's look at what we get. The output should look like\n", + "![Top of file with aligned expressions](images/thutmose_tagger_alignment_top.png)\n", + "...\n", + "![Bottom of file with aligned expressions](images/thutmose_tagger_alignment_bottom.png)\n" + ], + "metadata": { + "id": "yzt87qeEX5o0" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tJNFvVhG4SMo" + }, + "outputs": [], + "source": [ + "df = pd.read_csv(ALIGNMENT_DIR + \"/cardinal/itn.out2\", sep=\"\\t\", header=None)\n", + "df.columns = [\"freq\", \"verdict\", \"spoken\", \"written initial tokens\", \"left-side alignment\", \"right-side alignment\"]\n", + "is_spoken_multiword = df[\"spoken\"].apply(lambda x: \" \" in x)\n", + "df2 = df[is_spoken_multiword].sort_values(\"freq\", ascending=False).reset_index(drop=True)\n", + "df2.head(20)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cEcXh1MzDWFy" + }, + "outputs": [], + "source": [ + "df2.tail(10)" + ] + }, + { + "cell_type": "markdown", + "source": [ + "# 2. Get a restricted vocabulary of target fragments (tags)\n", + "\n", + "There can be some inconsistencies in the automatic alignments, but nevertheless we now have **one-to-one correspondence** between input words and output fragments. Let's collect all fragments in a vocabulary! The output should look like this\n", + "![Tag vocabulary](images/thutmose_tagger_tag_vocabulary.png)\n", + "\n" + ], + "metadata": { + "id": "OdEuRQKXYG3D" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "89zwtEQmQJZ1" + }, + "outputs": [], + "source": [ + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", + " --mode=get_replacement_vocab \\\n", + " --giza_dir={ALIGNMENT_DIR} \\\n", + " --alignment_filename=itn.out2 \\\n", + " --data_dir=\"\" \\\n", + " --vocab_filename={WORK_DIR}/replacement_vocab_full.txt \\\n", + " --out_filename=\"\" \\\n", + " --lang={CORPUS_LANG}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tx4gXO7CGzyQ" + }, + "outputs": [], + "source": [ + "df = pd.read_csv(\"replacement_vocab_full.txt.cardinal\", sep=\"\\t\", header=None)\n", + "df.columns = [\"replacement tag\", \"freq\"]\n", + "df" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Tags with low frequencies are likely to be derived from sporadic alignment mistakes, so let's truncate them, and put together the tags from all our semiotic classes." + ], + "metadata": { + "id": "Ts_G3TnLEQn4" + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2TKbJELTFFXG" + }, + "outputs": [], + "source": [ + "! head -n 150 replacement_vocab_full.txt.cardinal > replacement_vocab_cardinal.txt\n", + "! head -n 150 replacement_vocab_full.txt.date > replacement_vocab_date.txt\n", + "! head -n 150 replacement_vocab_full.txt.letters > replacement_vocab_letters.txt\n", + "! cat replacement_vocab_cardinal.txt \\\n", + " replacement_vocab_date.txt \\\n", + " replacement_vocab_letters.txt > replacement_vocab.select.txt\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "x6eEYkVlJDK-" + }, + "source": [ + "After concatenation the vocabulary file can contain duplicates of the same tags coming from different semiotic classes, but this is not important at this moment. The final vocabulary will be created later." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DoDHW-p1FUso" + }, + "outputs": [], + "source": [ + "! wc -l replacement_vocab.select.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xcQK5cHQH_NH" + }, + "outputs": [], + "source": [ + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", + " --mode=filter_by_vocab \\\n", + " --giza_dir={ALIGNMENT_DIR} \\\n", + " --alignment_filename=itn.out2 \\\n", + " --data_dir=\"\" \\\n", + " --vocab_filename={WORK_DIR}/replacement_vocab.select.txt \\\n", + " --out_filename=itn.select.out \\\n", + " --lang={CORPUS_LANG}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yWhCQJ5DLgoZ" + }, + "source": [ + "The script `prepare_corpora_after_alignment.py --mode=filter_by_vocab` discards examples that are not fully covered with our selected replacement vocabulary. We can see that number of lines slightly decreases.\n", + "```\n", + "4997 content/alignment/cardinal/itn.out2\n", + "4681 content/alignment/cardinal/itn.select.out\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_nqeRRjmKoWg" + }, + "outputs": [], + "source": [ + "! wc -l {ALIGNMENT_DIR}/cardinal/itn.out2\n", + "! wc -l {ALIGNMENT_DIR}/cardinal/itn.select.out\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kUQwwCkLMKEX" + }, + "source": [ + "The format of lines also slightly changes: we add the name of semiotic class, choose only one alignment(left-side or right-side) based on class, and remove unnecessary columns.\n", + "\n", + "![Final alignment](images/thutmose_tagger_final_alignment.png)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "83Voerh_K8gR" + }, + "outputs": [], + "source": [ + "df = pd.read_csv(ALIGNMENT_DIR + \"/cardinal/itn.select.out\", sep=\"\\t\", header=None)\n", + "df.columns = [\"semiotic class\", \"spoken\", \"written initial fragments\", \"alignment\"]\n", + "df.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C6cAEYNHOKex" + }, + "source": [ + "# 3. Build training dataset \n", + "\n", + "Now it's time to create a tag-labeled dataset, containing _full sentences_. After previous step we got a large dictionary of ITN phrase conversions *that we know how to tag*. Once again we loop through the Google TN dataset and process each sentence in the following way:\n", + "\n", + "* If a sentence contains at least one ITN conversion, that is missing from our dictionary, this sentence is discarded.\n", + "* Otherwise we assign tags to the input words\n", + " 1. All words outside ITN conversion spans are tagged as ``.\n", + " 2. Tags for words inside ITN spans are taken from the dictionary\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DQVCzljrMyHu" + }, + "outputs": [], + "source": [ + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", + " --mode=get_labeled_corpus \\\n", + " --giza_dir={ALIGNMENT_DIR} \\\n", + " --alignment_filename=itn.select.out \\\n", + " --data_dir={CORPUS_DIR}/dev \\\n", + " --vocab_filename=\"\" \\\n", + " --out_filename={CORPUS_DIR}/dev.labeled \\\n", + " --lang={CORPUS_LANG}\n", + "\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", + " --mode=get_labeled_corpus \\\n", + " --giza_dir={ALIGNMENT_DIR} \\\n", + " --alignment_filename=itn.select.out \\\n", + " --data_dir={CORPUS_DIR}/train \\\n", + " --vocab_filename=\"\" \\\n", + " --out_filename={CORPUS_DIR}/train.labeled \\\n", + " --lang=${CORPUS_LANG}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bBfuML8TQrwz" + }, + "source": [ + "The resulting file consists of three columns:\n", + "* input words\n", + "* target tags\n", + "* semiotic spans (if any)\n", + "\n", + "The semiotic spans are separated by semicolon, each span consists of class, begin and end in terms of input word positions e.g. \"DATE 6 9\".\n", + "\n", + "```\n", + "it can be summarized as an error driven transformation based tagger\t \t\n", + "this plan was first enacted in nineteen eighty four and continued to be followed for nineteen years\t _19 8 4_ _19_ \tDATE 6 9;CARDINAL 15 16\n", + "```\n", + "The semiotic spans are used for two purposes:\n", + " \n", + "1. During validation step we calculate accuracy w.r.t. semiotic spans. For example, a DATE span is correct if **all** tag predictions inside this span match the ground truth labels.\n", + "2. The model has additional classiffication head that predicts a semiotic class label for each of the input words. These predictions are used in the post-processing step for better handling of swaps.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-TjToCTDN8t8" + }, + "outputs": [], + "source": [ + "! head {CORPUS_DIR}/dev.labeled" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Bf9Evpn8RWp4" + }, + "source": [ + "Get the final label vocabulary, based on our labeled corpora. The output file should look like this\n", + "```\n", + "KEEP\n", + "DELETE\n", + "DELETE|_20\n", + "DELETE|_19\n", + "DELETE|_2\n", + "DELETE|_200\n", + "DELETE|,20\n", + "DELETE|9_\n", + "DELETE|9\n", + "DELETE|8_\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "2RY2pZwEPdlZ" + }, + "outputs": [], + "source": [ + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/get_label_vocab.py \\\n", + " --train_filename={CORPUS_DIR}/train.labeled \\\n", + " --dev_filename={CORPUS_DIR}/dev.labeled \\\n", + " --out_filename={CORPUS_DIR}/label_map.txt\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9cGtBQSwRj-p" + }, + "outputs": [], + "source": [ + "! head {CORPUS_DIR}/label_map.txt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "KL4DINweSgUQ" + }, + "outputs": [], + "source": [ + "! echo \"ADDRESS\" > {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"CARDINAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"DATE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"DECIMAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"DIGIT\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"ELECTRONIC\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"FRACTION\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"LETTERS\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"MEASURE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"MONEY\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"ORDINAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"PLAIN\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"PUNCT\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"TELEPHONE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"TIME\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "! echo \"VERBATIM\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "b7JrZxs-WTS8" + }, + "outputs": [], + "source": [ + "! mkdir {WORK_DIR}/datasets\n", + "\n", + "! cp {CORPUS_DIR}/label_map.txt {WORK_DIR}/datasets/label_map.txt\n", + "! cp {CORPUS_DIR}/semiotic_classes.txt {WORK_DIR}/datasets/semiotic_classes.txt\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E-fXGmAb63z0" + }, + "source": [ + "Now the file `train.labeled` contains all sentences from initial Google TN data, that we have been able to cover with out tag dictionary. \n", + "From it we can create different datasets for our neural model, trying different sizes and sampling strategies.\n", + "\n", + "Let's create a toy dataset of 5'000 sentences for train set and 5'000 sentences for dev set. Test set is not used - see Evaluation section below." + ] + }, + { + "cell_type": "code", + "source": [ + "DATASET = WORK_DIR + \"/datasets/itn_sample10k\"\n", + "! mkdir {DATASET}\n", + "!head -n 5000 {CORPUS_DIR}/train.labeled > {DATASET}/train.tsv\n", + "!head -n 5000 {CORPUS_DIR}/dev.labeled > {DATASET}/valid.tsv\n", + "!cp {DATASET}/valid.tsv {DATASET}/test.tsv\n" + ], + "metadata": { + "id": "KFwzGmuJlC0N" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# 4. Train a token classifier neural model\n", + "Now let's run training" + ], + "metadata": { + "id": "X1vWojxlmffT" + } + }, + { + "cell_type": "code", + "source": [ + "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py \\\n", + " lang=en \\\n", + " data.validation_ds.data_path={DATASET}/valid.tsv \\\n", + " data.train_ds.data_path={DATASET}/train.tsv \\\n", + " model.language_model.pretrained_model_name=bert-base-uncased \\\n", + " model.label_map={WORK_DIR}/datasets/label_map.txt \\\n", + " model.semiotic_classes={WORK_DIR}/datasets/semiotic_classes.txt \\\n", + " trainer.accelerator=gpu \\\n", + " trainer.max_epochs=1\n" + ], + "metadata": { + "id": "APBdPcihmFBa" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Each validation step generates three classification reports where rows correspond to different semiotic classes, `support` column is how many examples of this class occurred in the target of validation set, and `recall` column is the classifier **accuracy** on this class, i.e. percentage of _whole examples_ whose predicted tags match the target.\n", + "\n", + "1. Tag classification report. `PLAIN` class includes words that are tagged as ``.\n", + "2. Tag classification report for **multiword** examples only. They are less trivial and it is harder to achieve high accuracy on them.\n", + "3. Classification report for semiotic classes." + ], + "metadata": { + "id": "kjZU6fkvS0V5" + } + }, + { + "cell_type": "code", + "source": [ + "# the log can be found in nemo_experiments folder\n", + "! cat nemo_experiments/training/*/nemo_log_globalrank-0_localrank-0.txt" + ], + "metadata": { + "id": "gO1nez6AWJeW" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Inference" + ], + "metadata": { + "id": "hX-9t7XBqJbo" + } + }, + { + "cell_type": "markdown", + "source": [ + "Let's run the inference of our toy model.\n", + "First, copy the model, that we've just trained." + ], + "metadata": { + "id": "9x80qIKCsBQ7" + } + }, + { + "cell_type": "code", + "source": [ + "!cp nemo_experiments/training/*/checkpoints/training.nemo ." + ], + "metadata": { + "id": "dYfyklDTXuUM" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Generate some input sentences." + ], + "metadata": { + "id": "pVCV2Hchs-gG" + } + }, + { + "cell_type": "code", + "source": [ + "! echo \"on the ninth of may four days after her arrival at new orleans west carnifax was decommissioned and returned to the u s s b\" > test_sent.txt\n", + "! echo \"retrieved the fourth of october twenty fifteen\" >> test_sent.txt" + ], + "metadata": { + "id": "30KlsQ6uY6vu" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Run the inference." + ], + "metadata": { + "id": "uqyBEKn-tDXe" + } + }, + { + "cell_type": "code", + "source": [ + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py \\\n", + " pretrained_model=./training.nemo \\\n", + " inference.from_file=./test_sent.txt \\\n", + " inference.out_file=./test_sent.output" + ], + "metadata": { + "id": "SDSm6lg6ZOM_" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "! cat test_sent.output" + ], + "metadata": { + "id": "jrGJb9DcZ83E" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "The inference output consists of 5 columns:\n", + "\n", + "1. Final output text.\n", + "2. Input text.\n", + "3. Sequence of predicted tags.\n", + "4. Sequence of tags after post-processing (some swaps may be applied).\n", + "5. Sequence of predicted semiotic classes - one class for each input word.\n", + "\n", + "```\n", + "on ninth may four days after her arrival at new orleans west carnifax was decommissioned and returned to the u s s b\ton the ninth of may four days after her arrival at new orleans west carnifax was decommissioned and returned to the u s s b\t \t \tPLAIN DATE DATE DATE DATE DATE PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN PLAIN LETTERS LETTERS LETTERS LETTERS\n", + "retrieved 20 october 20 20\tretrieved the fourth of october twenty fifteen\t _20 _20 _20\t _20 _20 _20\tPLAIN DATE DATE DATE DATE DATE DATE```\n" + ], + "metadata": { + "id": "eYqtL7waaiZS" + } + }, + { + "cell_type": "markdown", + "source": [ + "We see that our toy model works and even manages to replace some numbers.\n", + "\n", + "To train a full-fledged model, you need more data.\n", + "\n", + "See also the scripts for the whole pipeline:\n", + "\n", + "> [prepare_dataset_en.sh](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/text_normalization_as_tagging/prepare_dataset_en.sh)\n", + "\n", + "> [normalization_as_tagging_train.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py)\n", + "\n", + "> [run_infer.sh](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/text_normalization_as_tagging/run_infer.sh)\n", + "\n" + ], + "metadata": { + "id": "AY9sQCIcUEGO" + } + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "ITN_with_Thutmose_Tagger.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/text_processing/images/thutmose_tagger_alignment_bottom.png b/tutorials/text_processing/images/thutmose_tagger_alignment_bottom.png new file mode 100644 index 0000000000000000000000000000000000000000..03caff6667c21ad34a2ab2a3071c20c53c55d5af GIT binary patch literal 18489 zcmbrl1yCGaxBeRvENBu!a0vt_xC9S@;O_434ub{=?g4_kySoMm4ucOq0fNH-gA6bX zb9vACzI)ECx~J}```=YP(_OWDb4wn*w2w1m_yxJSwmIriK95w(a|T3+}|Q+v(oJiUI&{J zO?kegp@|7$ea^Y}8x562(m>T|GET~ZjQAaz?priM;tu|HjHwAfZM2L-{yufjN@b9S zrZ)ThJU0R!D2fO^JgTh~^*mY-KsI?86-U2%{hv)$$;JdJFZr(~@a-2h&40E+@&S|o zyAz%%N94a6V+05Bzq_YM5aax-JI(le+dJDk=RU;JBh|lRzqV9MrdK>ch7Bf(|9cmK z@Bar!z(N0aBt2FSnd*!xIu@^6dQ#hpCY(F3P$ zpdlegrsXu)iOU3n2WKxdX@6YzjJNpCRCLu6FWn7%xGmAo20=Gv z4^zg3&qfUEKnaxQTBB(<_Wj*>>-dHVc+8hOJ>YKo4g4IbNXIzCcZdK7ZK z!?||?ab>99$05>ochX9$_MLp86f5e2`ht80Y;DtPw$36nz<8FQbK-Q3-e*R-g3!F( z#WD3tON*ypPh}uX1Pb2y0Ckr%(rGqe>vHZnS%Ie;c-c)6QgqCg0}XuDQhK-cv{Z7} z6Y=-nr7Z`QY)1+Q0X+ac1AW`!(fe@{l-k$i3zkN?mr=2l@DOJ@uhqZKmYafAN>?yj z^?4Nl!?;5(zJxLUQjvnwbRMmkH!mb`;n-;9Z#VsYdxd#B+;Rr4Y!j4q3DZ}O`!SsK zzHxiQdlB4qqnP4;30)AKCcY`Y6hz3}fd;ASc2wIOAGG)T@g3_K0 z(j)@)6R6{@JJp!T(`n$u^@l)A`31oM_zUxFM{MaHi=i)Ev=Sd*Rc-Bf-TvXzd`9Fs zS|unTQW8`*3~l?d$V^ijD%^JhHtK1eIMo-yGC4z75wJX8k*56Eu2<{4FcbK*+{rF* zZ|nxiNRgg<80sgv9Q}N^RoTeb;BmF33!x2FQ3m7}CVNb^k;WT|u5~Is9)~kzWBol$ zLmKh1cgqD zFb<#;5AEpnx~v5%u=W}_K?j?1S>@alsoK6&Y^<$Le(Ncn^XEf{+k(kTqaCd@hU)l_ zE~_eu&m9&=s)vhkRCtDqOAe~%`F8H@?WhR!Pov=t_lmM7tI;d6eyot=E{`2e=pPD- z1#o*G`)ZR)D%vvW$P&3TBqWHJ%NRCMts_%v@qzfe%y#u${34f1(>uezHk({b8zibP zTUt-bALK8d5hfwG`|xFZuTu7`m)URrw1H35sN1SHX~`hB83^yz`tW;G8Pf3I8)88Q zlZqctRF?9$0*K;uKTt^(uD$7TRcnomcksInzs&x!!A|uD;QpzSnI~tKSgSWnD5%yq z4~`z}&+l-7OiK8<`KswOs|LRu?AIU4i&W+;>muu^ENS^D0 z{#hP?TliZqF>ySE_QuOVGY{3n<-xgmPj+Y_nSUU~=TxfHiL4RqV!*sjDB?H{V>cOx z53}M?_962+VwMHjZPEFb7n8pp#ZzWsp3QJW$Vr-F8^xH46~8;YQ8u?ZRn|E!c_Xd4 zN-OU=BtF;Mw>feCpI8ZD_f^frZn<5pU2@LqzVc$0#Dn1%XH@{S6a~s*D)STBs zO&YnCQJ$V>?2&dPGEah@S{R=61z-&zD%(EGHQsLnJNyWe(-%N4EEG2Iq69ZbFw8S2((z-M}4Qq2uIxo!WeH zj;s^>tzxtUAf?Rh>?4Z-FfpQpms`7u*oroV|cE%w!y z!S zegP9UUWtinuRTLK=_Bl|ZY_F09;h5iiuqA%p9Yn2h(aK zMyaLV2~*)dN0=PlB+geV{do70>^G`dA9Gge^N*dl-=w?qW0)3UF9b^`VLPT?`3Hwq z1tDLOAa3Cgvxm1XeK$j!v%_Yg+*l>@z4z>@b8kA7(ZoOBMd3T89SaTvQ>72OYYh%|zi)t}M6p#1A6zLU>P>B2v$dComL^BZ>4!l(I5MN8z1JihkgJS_f9uWHVaKV7NI|GUA;t*y0(wnsXP z(+>%a(6pnxeE(PB(|)EVkh2aAqP57i(^R-zu$R^y3k|7BUk!QkX2VNbT_hI zoI1$J-s{6$LHZH=m;DrV-Ea-bTerG^zoTn3my}hB9d8!gz6Q&H)|wy6FcDjJe44AhDQtyxRYM7ThUAXbntqRhjZhz4H#xuk*H|*=zku_ zM7hy_Ox*p>w}K_TbnE zS9rA*AoJK|-+q?v|A^na*Qio@ii8{e1bP~zb$fNDEHN;dgdz;uH z6{$*AVq&m$ipuL zbc(#IQU60ZZapv>tb{uSh+mq@`R9sFWU8p;{Fe1I{>J>GQ+M8|dc0xQ_-^3R?`9fL zL`?ARfy#f%vPiOz1sbExjxBZlp_6^%Ma)R%$_tANle5F9bnxQ{b{Eb8qR&A{WhxgIh}-iU^+d{<;5ZcXiRSPk-ayReo!FicgU^^cFTnXeoM)f7V|r z@L~Ng@5ky|zW9opMWtK}`{s|3JY+g08{>EL-<<`Xr_zLz)UP1UfwT!80lyXbzqwL* zC}USu#hF(IL>V_da=3{I8qOV!O2F6Hmy3>V>Ts6JHwnZTEpYK%>(bIr%|brcLGZ`{ z-v#T5QoN1k2Of?vc}t{1#jVV-@fEHIY^ZQHIy-ilDt{D5JRHYXNn`OkfQ_$@j^00v z#`+ek%mk`aU}ce2AHMb##GRu&X165{BRs9CCmr#{!k&&1ppa1p&G%t~>Ofi3(dYMj zO4rINqAF=`FK8z@aG@*SHO5z%^7t=7aL%C2 zrK-nZd%vj-I;H+MJuQLruQ}Cf?vfKK!ntdczS24FD520n)%5|Q%%pGc`{teTFfnzH zhfj}1rw#?`uwC+^h)|;&A-u|BrDtE40*pY~9wXmdL`_fl3MJ`+rnDjsZ+*qmH*IRY zNrrH+(9phLC5V@pqnelI_i!GENN=MD^LSGbJH~Vz*YGkgPL>xq3wZv?7yP1Fv8YBw zf)h+G$p&G?)uAHCB;^tOU~0D zJ&ZT(zD?-!Y0EPn$~{hBL$L3HdOB=`KqD}p4FJ>MGJoSEVn!N7HLG;`!V_e(rdxb@ zik&0KA;@*DJYy~Ql+q)R#R2fswi{%5?H4+e1Gt2m-g!)9d9ACj&+|QBb%=PckIAF! z-PG-*)_9~H_NB0Dtso6cV3*N7a!<}Hdhvx~=J(^sC7sJD)pYtIKk2J0(ptsOk{wcr zIbG1BoiA5Xxc_Xiv(Qued!GQvW#l*YeH=j}r^u=L#KkCWDiXqpIMc#*eGw>{Toe+? zm`2Sw%`|!)3eVm1nosuhRumVxp2c30w|JaVycPf^hwZ0#QzTgr8=iEP(ye)1i$nK6 z%_fQeSB}4EnoTeDZ{48xzbg&@)#v}A17L>!f6EMGF&Yg><$p)LCYSzymlXatq5daL zO%zJtk&=v7SWbA4RE;h zneN(Y&5Cpo2<+U}>&G?&eo6);uU8s!E-O>bJ;T`4-78`X*Hhx*7t$=wSJhdlfB+{9 zFsSob;@)&ABeL9Ti*mE#R#(6bNga#Yuu$gQ@ZesJx?TosR04&wvgpwNS z8r0?cs86CqAkEA6=@_b>FOMGQ`JI781|n5@CSz5sHVa$v>Go8+GIbB4T)pi@i0I6; zBvN!{W1!gmcZBwWF@AReR@Y+=v{-z8)6G?t;LdE8In_td=c)=t%giIEUR{*Y_(&sK zbfa63Pg6rq{8D`ZyfSXr?vX0?)`&rZl(#vLj)taCK@Nlmo!B4}VX+6t zqK6Kophcz$a|0?LY?e-9y`EeCP3CxRbHA!IS9X93Cz(*zGh&p!{8>I!`QT90t#x0M zoM=qWoEAQ(kCnN??j=tBGB8L{o;RC$)-)$WopFaBk_I~KoO~1#q8>bae@GlI>OMW1 z&chQ?-8uh`@aTO_wTCypDSulFyAkzn>MREfbK8nvZn-FQVq`QXeVW%@$wMz(fP75^ zchUZD0`GuHJ*$wHL8`=_{bqox!WZ}=sOU~_{nHhP>SZSv7z5(CH=}m4GqA#)1N1Pi zlfgjr);1Zp=GCwrT%zO^wPlQ)Q2sIxEc3DD^u1x-=e}ML>b923i&E2S5FdsSLr&I? zFIy}wpDjusc2E^Px|&tJbNhkdKUqg=!-EzPZg+l-RTkB6`9|CFUWVI0qp*3gA8%ov z$oPWvTEAK_J0gd0O(}}ONH=jZP|T=9<#FAy69+Q+I#`nTBXV-Kv8UT$*f@K+#d$yk z-v4MmSFosjV$DiL8I2x3n1V)($?ZPXLPGUgXNL}#`7DOMCRP3{kJfzpu@_N|Hb}#n zf`!W@xr;|Awxe+U1tp8hmdP*I=2&B4YWVpZilS%^a%_ppRfkMgMd(B6Y4NG; zigt#?H^{G@>s`4#|W2)1_c2zP*x^U%M*Gp_9w1ukez=d)UH_r8)TLVREiE%4>s(gMgt1^R@A%2u+iTk59As z>t_~BQN{A*9N;SAH* z`f4i>a_|Z$hs%6H%oWzriZP)r*JlYDxmwR8^qkBA;WCJ{1%e}zHA6be(p5Pu+OLC{ zl5^AERbW3$T~uwl*C(q=m)J;L(guH8Nbhaa%TZQ14V|werUmgB?Zra$3|DpUubbJ> zagwt(k&@Rzg$mf?BPP=$K(V4QkBK%Y5A^|(P_xcnz~(3xw13)?aHf& z;)VEk7q~?|6n@n-YL%ul+XCg=Qk%BdCDgl681|KG#5o)}P$iJ?@R!MWJ`8Topu`=4 z+_-I5s_w5zAMUht3(JnTgMVK z*$o!kSlL(!e_fu%f-iGdcEXdtkPgsP)>pBjT3ceGfyGx zzpzi)>OKh1CG%bC;t@XJY4$NHZ$0>I!G%KpcSjVrnyH*PwVbNrQLg}gFN4~f@L2Lb zw(tQ|qi#7It*qIPnEgsrKAXDS>NgEM^&Zkj=(Rp)F?tX+PNVa!%m_SC&BBD@llST` zIjl03)cNnD!sgS4AM=6&H^-Rq)B8R}MchfCN{*dpzWb^0XC($gai!0m6xk>(IPrWT zV^%h|w()x;mqeSu`kT8XmRL-$IIoem5vydTOr#o^em%jUAYq zw{>wJAeY)u556oc3k%LgFAsW+Xa9YYyF zulK{HsKyQnyH+hdB#Qzl77C*x?qgTv^vr6aB6_){3K8^A z_U?z@96H0jKvrkeuj%M}EPt5_=q%AWTE2X38LtaoIDQPvfmC29FDym=(W`O%v%meC zdf*lEU6^+H;-&TqHcpW=VAT&;<@ueX(q=TIbvmSAIsB_+gAGsV$AjNxRNY4!_9JgW zhuV`ar2)-Mw@umZ#z{TD^$q}2&SGlnpY+~(sbVW4BR0(I<^xBlkF~$u$0Ma6U^B!6 z9jTR$igsSpWhW$ci8iww_NyW1ftPbUJKvvNa({

97XMIbP~1kqc!tzPi#%F}-+B z0zsGw&MN|SElEv8$P7y}K*jLPL2p2HzGMx-_ZF|=xcgvkMLT=&SFZBepDMJ>*MWW6 z3+g>>6IAiOk5`^E`!7Nsb<%Z}-wy&!*$v&|=DP}B!1ol{l6bu4#d+<&+gkqF&jLJK zn}kL)Vm^<67?;kwY)p0bMU97+2Nhphn{X)9_*O6CME?_7;nz@Ujs=t8lh&E;KGBv6Ouz@1&^bC8w zY0e)O%4sq>Z|vb2I)@Hz-2a_@D$)N7evZHYKgdY_dm8cgYvnRhd9@83Xpcn#IDM zd91&&_dj@*q@(o;pg3}!G##+WYYNo9hg#g%xmHrmX?i*Mf7&SU$>r0{Wi%T1pO0iZ0FtX?(L#IIS`@5O0?m<@cef>n zM;%$Y{?a!o(?8>J(go*Y?t+@mzd2Su7 zo92!Z8%@%I?E5W#H=^`uId+;=K~G#b75IcNT-8S;t2P+MJQGeLS1^tS%kao{{C~bY z^j{g8KY7ri=idh=+5fQ98Cv>vVd#zD#pw|`Je5P~EW#^C*}K)9KI~t)k9);yEBXXc zE13zWJetR&CLsD}-BKWic~_jwuI%L-4|A~FK=GQ?3N^(@hzIdJ(rHD@R4l2O1AzGe z?N2y8y|7TH6huJ5BPTDw9j+FUIPf<06K%<@UZMSMrn$fhPi4uKPaS9L#m`;VC~vg^ z2T|uC-3h zy$>y+Wii`%*ePIf&)m!(1tv4E0j9HVaHMJkzLR=JXSsyYB5rHh)!CxXqgJCH-A_D9 zL}QTOwO;n1Qi*up1L^`ATt7fD<2y$Z_HQ(dW7l?EtSublqIQZMN z=RfyLQwGEeM)U?zC-Rt#!YrQ{<;ND@ZcX=C14AajuM zKY$rK4h@xvs=Fu@p-|2K2v5vj?h4B;g|5lYS(_-|-J1Xge!-)G{IZ9`RoOtlgTY=<&mKBk!MXTIUo65#}D> zJQFc>zd2V{%*;fYq#k&@Bpa3Lf|WKrQiLyqhd@>`mkd<$g6#%miD`rJ%L2Xj zHQg>Ai3H6OcLHX&@@k#j2L|LxL`0``srF*DHR>8OR;gaqH)vTc2Q*J9dBxDLIBUlh z)w{H96z1fe@Sp4=ISP)^qta@*|@%B#51$oRmA1U@2eZy zc{pxT({qDO3Gk%jzfrkLeHmBbvk{M3!>Zk)68=wvj)-_PV3q`y=5AG`YXP>228VMl zzjhDkIK95-tea4Ybn9|6s(TlQHWm-oGRIk720GKJrxGsYN3%D+Z#NSgA&XZ(8(}Rz z_df%`gz^YHatCmX*I^^ZLU~1f_USx$g6MxSdVr#rk7i=_r$mEMPiS4%ksDTKe8!fD zxY@M|bQWIcx2OZU1S$6)U)@+tGsF`C?y4GXp_i2pu!`eV6*!I!>#bqla$Sb?q{r<3 zz{51yA)tSywh#X%F88H@wb7viCG0i)ZbxmeI9Lc6E~4cvn6*C;5SIzY#`n&Z1eCUQ z^u1VQ%?`~7I%I;y+B%9QX9pQJ>Tl|NlEZ=yXsWwRr6}mLhf3)a>adr)AB1xn_4%$K zKT#-c>7wL%eau=3Vu0M1hC-v1JK}dT`A^bWa|5Fg3a7H51|DW#4bQ!e8D@VvZDZsb z+#!AqlM`rf_-%NmxNUe0BEK5eiImc%C9nJ z2ioZmxRnMxL`Fws586;DFOQ8z-Pzucep@K0RqW}ST055Hd3|#C`kK{=H(5B(0Fm_@Fi&M#)B1*W!Nu zzQ+KN~&-;nXn{>W6flFCLyLaz-O!(yVKvA zup=XVzUTe56Gx0^^{)2fhR||X8PUUef;cUhJv)Vw5VtHp@h7#;-^@c7fLSSvle;TY zxie2u&xChSXpq)!U+F1)qNJ|Q(-+5;JFvt)2$&0Fo%i;K4_u{E?V*vmyi4Cjq%=?BT+3sjAYUi|g5mu3^BOJ#y;o|Yq zp|Vq*K;ONQUX+>guqiCz&lm8gMYgd#p^RuYRzpy1v!Ym+b0HC zyT!3TvOz6L;v(H=Geeai8Nyc4OZAFFT9g}Rx}@&uEOcaS8v2E}gx4FPnX4|zlQQe< z^hr7pbn{RNJfSm=U(UTs@%3n@4hHbd7EKM1rarv2V z_dV`)u1^kyRz~CI%Ki*2;={(?z`=mC(Y^Qn?Nn=6771!**M5#lUIv4;j46!3tTs$z zo`NDB0%IU;j`GGSm!NhY#>RSYY<5Swp(IHvq6G~KkfzyrV9W9KHCZuAw(D*U9{(<& zG5&p+C>*ayt&Xf|Z|Y5)%F}@C4AsrXVj%_eA&PEB9&FL4AHSfe`8ylitEU}l-r4VcEorylG%%S}J8%Bl_{8?f)#EN;e z-TKVoZsck@ICY&`9=)#dMZTZqR(YB~==q@=JWun^cd=u?a(t(BZzo_vMT3~La@;vn zpWgCOr#dNxB@?MyV#^E|wV%Rs7s>Nc3lM7u@&6jEO0wYnbCN&P?;0GOunb)wJ6g=S zKWP%8zLDF<75M@AEwv+uvD+2T@GB;kh4iC-Z?lOe}mEy+^x9(>wP7 zeebt@CfY(&!f3Rg)>sLw_??V$!X%Y^C-hZuCPpI@zAfZFn}~&RUK;J&MaC20CZ17! zjdNYM&&;4dVzfB?Xr~th+!+)xPUElexRIzlgtSPArs$m>ir&5$VS%Wdj?>EtMqj@f8~6tSH{3xMF_VB4QcGZ{`m%r9Zx3 z;3pR5sGPGe7C$wn`eiBmN~7?)TooZwM3{8&R$I6fNhf%pXqU_jHpJNGk~Qqh5`r6W zzY{40Qk?TjpMDsiNOWwnj05UXcMdpfvIZHRgL`4mL@Jq6u{=91o@U>p^Js?M5aj3Y z*}#_nba#HwAgyRUO8V9xp_z-WY~Ose2h{ zSUOT47as-~6~jjvZ}$l{spGAjd7U)Lirz#( z92__M8~Gu~KtI5c&;5fL4Y!Ay?V!aARCd?Afv0fa2X7fx!0yca@DFn zE#9Y~0|r`ir;2h9QZ}0p@AE^q-0@E_jkG8SSuXVI?!I?6Xs7K(S`*Z50rZSC!*Ec@ z`-UB69@dWrd27by6)XJQ!+PfaHBVXEb(W11W64{h-uJ_Qz6NP~4;itiWRLbvhLcFq z8a+8Wc@d18)(1&K$E_znrJy3h~M4S{mm8HkYn# zA4AY6je3+?f&x&}=q}Rv)vjvUC><0J=hf*vGLi3px7ad@aL73+u1Hz?2!~Vd zG#ooFqAuEJ4v}JvT|oZYym|?uzQ8s(FiFMxMl;&gQK(R2O%t5tQ9flp+jk%G`s3}% zZJssS=c8fLAF7<<`P$wi6evZ~gnl(ex&$uB^KFpN{4TE)?Vs*2+Zpp>^$qOp-+X^A zat7m#T-eX;zvxMpb?8ud*a*?RA(l;B6@0h%UQCN5ZCubZ>T^$P%*e^-KA?0xrr36~ zs}ya9OD{~Y+Ns@I%&*#hc)ZDjGxYN(XVc?*@n7sia!by!UNgtMLIf{sOPCE_{`|mA z6OZv9dS*CqWBuZ-X2F~~ipQ=*!rL!{nChe+AKno8dlV<~I!Ywa3@FYS_-AfM<=lic zj~Hm+(!MP$s;)-E_FUcWPFHWZ;`xkYxL{v(V0=BZUs8%n5o46C05dE2b73S+-?V-V z%r}ksy%_4-+ylrP*ZkYSI^|RWTJ_SnhdGqi~-36i5%R({Qj0y(9KRk!wV^Hyeph5Hz(|D`? zsWE%TdeC*!p*?jut9aN8_sZ*wt?=@oUQ+Owf*+53QeAnx&j}>XtP5%t zb};8QY~0eB%vYJ|kwt!%b&Qpfe+MCGV)OZ+@k(Y8Gx9d(mmZAMdsD&cwXU~_IL20J zX~nmde0;k}bKjSwBI5S>7F0aj?06KY1K06XgEKI(YF(d$@Y&q`wTDA{1+qCY{oJFJ z3X6rC1`hn+Slm#;^J7yTuHW5O$+pJr$1S5HF>3+1lp7O{+8p|{kUlB9aX;A^zAu@F=>zTertyo-P?C`ztf~GE#@G* zb`L_M)$}nEyurr~8;^o7QJndN)mydJ=@q=~N1IO(M)Kog(g9d;>oBh3JSB9gf>Oc1g{W)m) z#zMJwO(CSY>U;1&yob2(Ii9td`4%tH?*q+5-{|tw-4a(P{Wh&LUf(I5y6QzP5<%cL zf^^9d)JBx&f+JRUsfqy1g~-P7+q}4kaL*?L9Bsg)R}<51_ZG2nGM+t~Cj3`G)yAB( zM4I-e_=q@T0{1eEqp{mrLRb&~;^kg1|A92kb!@)*=M_@E>HH(GyuQ%Lv6HezR<{fx z94QOGS#v%`sv4Hi=9+Q~zAmBiH5tBMV?g%qPtT-6>Rm3iduD@=)m~YmlNKguZvIdy z`IPL^Bg}zpf3Wh$fRjK6n=7m0N29^DX^(wln^IVoeac4nH7DKYd`m1x#JO$vRkFzk zpJO@FU%0(3?LVksEgv6g$S7KK+m9xaoPL zs^XF@zcZ!dc}aerDT2$XQ3E*3DC_q!!HK4YKIe^m!j4BMLh+fnm3=Kr9+=c$4S z@b7w}O0rK}|5m{N=bEGcg|7WS6&#J}a4^yYCGQqZCpTe5jOB?|!~$yT>s3Uinc9^% zqa+vjB1fG%_D`C!Pups5HAIXrR^&+LDoZZA1KYuQidWtUgx7J`{&PgmHK6F~Q>(jC z&Hi;~pwnRK`|-?zn&=!Z7Im)cj?P#`>5jL~##A-ks;TqVL<&DvXs2DF@w(GZ)%mO2 zwFy_bEgPF^hRk)izCB&j^Z()EaV{Du}}*x+(B}&n;mb(g)p8WT1E=vytQ9|oE5FYq5QzKc#o?Id`P!RF8^hL znEt+EU#Ds}4DLAcV&qNfgRBPqJjWuGpiu<(0#i=fTOcW35b%YD#LZ)5T}E77yjb(s z%&0t-cKJSD-zO~**q_$u(F~DXzcHi9v_Q|tP2V7RIxO=l&N)h4 zdAhE5ANI0cf$!ZJg}v|MFH0uUB{O!DOOKm6v$52j&{9>SQv3p8n;vPw8A^j3BWEpC zAIaulB{~IKhK?aN&b=KME{_CZ0t$2#Joa$!%2tZn!tsL1>QGm9n*u@faeV;<>2`boYJCi)sY_mK=$M`2=^k`Gc z4S5&d!-NUrS#%&l*e`WIoJ2NUe7U=_vAE%J%{#@qM24HRPmaGO=U+RvstzKgL-M=x z8cpNhrU?JIvdLKemc$?qDvGi__?X0lI`m1f`92DfCg!c{_ay@530J=S;vO+@!hUzme?hU?T0;||T;r3}i(FTuw1 zYI`-lMidFt>Vbr=-kbi9Edq<_yHf7w*GTk68e-D+LD?6sL6+)5m z?i~AE*Q1}}9ssY|s;^4*K;YOEHUU=r0v{?-ov{WpgUFIrz=#iwlE2iH%hpE+@26+f zT6{s~<>B5U)Cm|oH^`gVuLwe8%l1*&V~s-J?dGx3V??{l;$GEJCJc!KX7@<$>Kh_o z#@O1(e!wMoiE156ijQ7U_&rG|ZBtBoI2#cF*TsHw(?jH>L5S4a7@jEDw$9GNps~A4 z`q{YuvTEj<*(YiF7^$`w%Qh}`r?@7&Hs5%*{3_q?{MKT{$cXJM4a^MwxMAbm;Iv1C z*u%M-{}t-?FR#=cf7yFG-fqFCQrHyj9&g0M*;vkobp<-v^ayuy*?5ArquN?4qO*b3 zFz6q-62apq%1Ca&pqzjx;32!qb`PMA?BwM#Ea9##(1v6m@K~@we~uGuJw8L^00SIGjS)P|4uO4`g@p1 z>8~epwwJoJd^SjR1zTsY_U}_Ml=cZw@!h`nv0E^?gU7~A_j=)xZjpV9gmsAb>NOh+ zRtG8fc&{wCna{Z_<$raXaMvQObqLTwGj5i=RGR7K`I}>VCQ>IQwcUbmMA#!h(=#g+ zo0O9!mQ6=M6*LUkxD7@W2Z60Mtx@s^O-}Fat5UBa-GIg4X`@fp8~UuOIYL{b`*H6% zKK6Tl2#HO57f^FqSnirEq~ar1U80zG7F%dlZ7{W|QA~UdJHNt@X)h7_lU0_{ymF!I z?&e6?AxzNU-S((hP+`pAjl4zLNJ+Xq45<#7%wmBaMhC_!r*(u`E1={3a%w`fnut zw;PWy-mfuE6YFmkKC@VZd7ljK zPJ2&V>qF4I-AT?&flet*OcjwghmE2Vpjhx_@uOHK^0GM19q-w``=9lsY2G=&frV|u z2Vc>taGYt95W!9q%7jgJsO*GR+=(?EN8S)Yk|I~1MZ|p{$>-(}tl7_RNp4srVk5gu8EM8olR__my9AjNZgiyb$Fdt)O*wn|n#i!CQA} zrev(hbB21#qWMWVo&Iv$j)gtn`{>U(hh+e_?W}#)#*T*$-dDCC4QZO1f|ZYaFXSrz zs#f-Kpy7Dpp^9R3N)2>`0X_=0zB-ZYC$9V0;aY-2_Gs`Y(DM(mr7WLPP#ud4@^dCF zqjS6sZvo{h0$W+;v&VN0o2vyDo z+w74Dzv+1QH09r?;8ve$8FhXf@+e%dPL=XVQnz;(&aF_*olnS$yqM7Y>`QaYcXKEE z{;2biKr}V^o6VM?cWu0_f{h^cMW|?6`6xfBZ@M=!0k^J_(tyW=Ls5ZWfy4N&-67WY zQ-N!J1#`n7&Mtj2VZ#+=uNAK2W!8Qq`YkM`JED7Mxb4XnXYGjU{p}kUK8R#`KM!&O zku2lHa(1`WM7g#t)~oZ38DO)kVdK`zi_Q0cBbh)WZlXg~$7XY3c(ouUT>2m&c*Gj1 z^nzQN_S+IK_>nd)WR3td5y zTA)f%TFoCSgLzwBDR<_D_HhCH2}3YH#euS9{agQjN0wH_y^OGU-Piun>JKAp?6qBG zQiK&rT~%t;2FYmRcNGZm6;5}NTHh`xNYm1bre_p#uVh$wR?;p;L4xoqM<<%+&`N(A z&ncV_Xe9;VpH;vY;Gx+5(!cNXIh_{pvz6!wrh!6rZAl&0EtPy|qB<&M%<%UK1)|;? zeuENd^6BA=zvU$;Waf9jsF2!WqUHqb7BPQIk$J^RVRtp}Q7^?$KXWer8AF0Ts86Kx zXDrbq-LQS=9zmh&D=WkIZdIbf?BiH}Zh2Kg_k6Uep!U4<6*mXp*4ZK8Hw1@Egm1Hz zn?QPElfMI!lg9O)VPbAv=JI-C%zB83V8x6B4cu$EEp2~I?Ju8Umd^K2&X<4hr8GdL zqhl0t4Y;!9SQiLH$TaWM5I&8pr^L-0`m^sF+KuOf&(oP(@27I0t2=#eQWry^)N02s zFf3sa7@E&sVncDO6Xe}e@|J;kK53?YZqRR@;G$y5aRctM1DfkMN_dd#(I5p6u zk|>1#w(%eLA1UdYo|93KU&Wcjwsl`fV9pri3;~t>#uH+nny1Vw(a@h zc0>-MP`Kiy_+iBhvKZrA1;_tuhfCXC=N&$$e63`{u|0KxWo--3ciXWUzukM}v~}kj zZOL70ejPRqKRJKbc8zsV@+j;;#99M)!u@ z@2m3?53?l!25n9=xmaj|iA z{f;G8Pi$GF4zn2mQ>f~!wjJ@ac$3#$?c~#4Z{kvw;Bw(_w2`&#l6{5O6)$#n$Qu|f z-(PLWEG5ib$I1Owcg@rczoYB9Cy0af-xKn5F8|rN-|*{YZQhCZl-EaUnA?U|FJ{dC zs*~`;gvli$!BhXxj#KY=i~YqNm5%Yv-cqG$amLCab-dwJKI?wNf1NT$s zJ{BX4=^3|^E&TBTVEqz$=^WKZhh4t@*G8!NmWbH0SzB(J-2D8DcTqoo@$^ET*nb5fZC`V9^4IEt3nzmO{yyTz zY#q%P7@k_5Ue|n}wIum-+R|I+4?NtR`udmC)yRe|`Gwc~Zr=Ztw0zN9yYMv~&$n@_ z?@?Q9ud{9a*&Dy-Mc=p{b-iX~N!8m$JJw%yj4hs}DVNqd=@6&Z_Cp7LpFOqj_naSJ zA5`huWzDl+f3)*#gXf;_#~*L4w<*y7y-7qScBoXE;9>2fv+h+Vq{3%CfsEmrjJeH?QM7Jy$*;|8Rf9j%LQV zZcx|U{A`UZa43xBY3-AvAXB$p0`|3yXI~O{dOupNnyt;4yr+s@Z-oCB}dETB5ga}+t)<{*Tg-XMa2#kHc4mg8;OrQU%+O*aw+D4K2$cqyV zYP_JMJgDcsP%s9AVm$KFN~B|DhJ^9y9e?T*Pi0sxbmZ><9{0oG>FVdQ&MBb@0Mxzr AEC2ui literal 0 HcmV?d00001 diff --git a/tutorials/text_processing/images/thutmose_tagger_alignment_top.png b/tutorials/text_processing/images/thutmose_tagger_alignment_top.png new file mode 100644 index 0000000000000000000000000000000000000000..dcdad57c4e0854d922a320266beffc4d56fa5961 GIT binary patch literal 13009 zcmbumXH*ki1GXDPic%Gk4k{u|danTi>AiQA4$^xIA_CH-MtUz&rFSCIi}WHj^w1I@ z^bp_+&-1?L-&tp!_xq9TnXJjIy=P|cec#uk7oJ%YL z0A6cFS!o>~Gx!o-0J-k6(BUmpT~A=PryMIm`L{fN@;92#9^@|Q=@%&QkUqQDGFsM3 zfUgk$;n%OS3xpDnRMM+JhQ<0X8uuRLczi2j{rOPg(dlK|Ra4!|gX`>6zErW)=JSz1 zj*1|2;PKIj3Su#p&t?Rzp4zFoCX1;N0I-s5zk7>yxAltvKj)u~k5T}lyC2?R-?P0d z0W_pi*mvdMPXOw>@)z#?yM_Y*g3mtPwH@#i3yc4*{6zG>pCb1S?pStlkCvrn!S}~n zBqhpav5!$%dWCgGX9Es9X)V!9;^OKB17diQ(@ZPs6oPSuPybj?MkGA;RP51>M=FhP z{z-a5rdsu}seto{9!%bm!Dl`ZejNT996f9OENM*Bul+JmvQU|O0I5_I!*#`e3<$dI z=wCe<_YNLy%tFTMitIb{av?42f#f@W#Dc9>Qoe)fNaU6{?z+iPjog-4E%c$4XlT?! z3aMjN`@^;D?M=^19RmY}R%UOF*-ur%sv(}*pAWZKSxR2}$}K-b8)7+6dB?LP&O|qB zo36D>?W;Grj4ko=U*9KxBI?{EP*$eg-e<>GwkIn}E|g9zJbAwN`+Vm|*kDB&Z*jT~ zpI2MIzR2-ai85Q|J6+Lx6*W*kx~VWT;IY6aJYC)O5qVI~o178-P~}wTV@Qq>6Jv?I ze|XFNmxf~Brs84&A)Ja zPX3f!dvQHNN8j`*VaHach7_ni?3j{rlNHP!rQ}}kN)+Ny59V^5XhEHr5}@8Q^rn3U z(W!6#prdJxiWYFP@GiP>ZgvQR#er$ZThvcD*xi8nFT^J5;J`9vW#ji7f`3|!hhxpX zaC+i~3Kr_p&35t%jIC_{#P_{oPQ9sFVD)sePxT!Bo}%X~?bPRUvvKZ|aH)RdI3IRu zwES9~KGE0oHn15Qfba#DBja1!e*4O9RWNv|77@2V;`^+V^%plCmN^d&(u5c$X3{q` z8P^3*XKPJneMgMo&L^|=HzwQXav(0Y3BwC$n<@-U>~UGydcuPsQQvIW6Ni)34(h)} zQiJzmt{h+)F7$~*z7H)^SDb(qK8a2U%F$XOU-^WckFUd18)1X}-eyM9X+sBTO9X@N9c@}#(iu?xq5{KOYff#I+V@PxoKNlcd_f+& z;Y?65*m?V0LxJz4A(Q!2iYQMc-(=PLsGC?1lfJTEtiA4pQ&SC*`J!;rt24(=kQKzq zd|n3`;E^Oa3Sn+^MoZjq9t?fmn41R;1s+=iV<1dBakJ4k-fSUj35-mW2XU+sK^Bmf zf+Q8jRC>3Y%~NQyqW-#Fp6+Ac|(vESn@6VmHj_b#xvBOAKOU^=lFiMXI0=gj*098!9hzrlhbTm$fak2+BpGK1>4qgDKO`GoxFuK4ccZ)3eH#&a3^ z)*GSIofne^5U!3avUO)v)SR!S-j%QJwg0mEb%8pgI&A#mZINNe7~hZ>TB6MD?{}Mk z_K}L%0X8naTC}~2^Y!^AbjbI*V;4B&?ml3+MHHuXGtU#&Npeu62y&&13`xpJFKs48h{Bxv$ckz#~ok} zZzZ6+St4{5qJJ}U+PL9$x)5*o!Rz|Mt)orzy;08TI)N{KpF9o@r^PDeEVt1K?X#9k z%SKi9K7sj5JK`mm*Tuzod9v`3K8@NBHmWGQ;4`+IbA?b!Pj>E*)$PCmkpK8Rae z_qrQ_d9z`Qc^~|2a(vmbj?YED2{>1(Qkb&jTsLN3wg%S()&^`;i92nmc5FF#=|E7s z8Mlkm0?A=s_+~ss&eObNeK)-&CE4~l;A}*Q?0@Dy+yh}cJamu+>lm(EU3Rol?Y2#nt)!)QTyf0>T6e3m(=bn z>PfqdrkRt^fn4u8uj>J2Y7dO^Xk6Ej*eoW1<8F#MY*15w^hIzgmPL3(&fd`D!k;{5 zUt{DeCeK2OLt3Pw1sqBjUtdedGL$PRF&NErvGE1S9P2;(MRAKdwU7<2A0++meq;Po z-nU6tEtqoX={QTIsGhgpGZO1bM1SYhaY%(nThkYWLUd7BOTgA>zy^`}WlP>VXSXIc z*fw}EHO69EAop}5eNRr(yxAo%HNA!lCW~8%9)qr=`S%b(7o6UkZ);&07HS+sVz zctabtDnR#6)ygY$y%L@R6(+TY7*2*9pT`u2=~ohaV_o`(YEx^M3Qv=Qdevlb!mQ z4WCwX@Ey8-XWImYY+@%bd$n^vsjj zIEYWTUH#3MS6I>}1b|+oRk1&H+JxzmRDO3Es|AhY?WX;TzqFS}kn?aEe-MT!O=s)? zZO>HfQ^i8F#+~{%fy}&=hnp2ImI)k}SEc5!gov8B!UMUmN3=mBs&G`Ym6oq-sIw+Y zh7&8$7n|z*d@?d*bctpmi%eoJgEM$Pmv2pKUX4j{tE?85JecCpync1+erl4dce&zk zirRqhQbPx>6JrR)FV|J=7Hzv{OAHyMS#(i=G}lIA=zUAaMCQs%*Z0qy#Z8R;w&!U0 zk)fiyjRAc^SxHm$br;hS9)g#0@ZTFynuILXNk7^t+}h)fADdktfv|#8omn;^V&81*zK)Tniu>? z5WcY~)gw$yk6!`8W#%-vb@BT=bfH%w-t2-W4ZohQ!@(*;K zke)`5EO<0}3;Q(6DrImOS@&{PX8}4b!QS&d2Kty&8D~@d*9>rf`Ns_W zFD5{|15cETOa1!(omtTE?dk3=$zD9Blro2aO#l1ibfu~B^#*;I%iS^mSC#`!Ed?HP z^~?VgGF4-x?3c4iaB@`m!87I-9#lpa0}U|DWY_huB8YL40IQYWN=AdSPqY`FBAqM8 z!?_yV4QJWm-!7bVUH1AHTIx!CE&__hxkYn)RdYAUYQ=59!-)s<4^JdztUl;&?U*hi zYxH6-kDb}Y)&phM_`!CTZDt|yAQW^=9TiyJ(swjvb%KlPyc9wVDyl;v)^2fA>H+(0 z*P0}Cf~Mxlh~gERSQ$&R`n*w#UdS|T^<6I?h%l#We4Zk4Co#NO3-Lp{?&B1gztGr7 ziuemb{i^bDkV#2d&ye zJCFUA+2!55x$NH0oQDqY8Jh<{rAYtn5EqKvL3DL5% zT>ZSgXKOfgFAfx8oOn0`W26F{cTd#WKGxv578)kXA zg>M&Lrj0pd3{)SQ%lGC(1<@6C@SIekP{(Ka% z``De<9MNy59+7$AIx<}dVwJIT-g1h+ZH^u`Is%?t(L44)8)!u1U+7!n6q3(7V|g5f zzL_`HQBghE#P)D%(tPO|Tk;lmvjcr|Ij||e6UD?jHN4ug_Xwi;wEhHmR+7uf8bu~* zfl{MJ|E@Kv$T+`}1+&<1zkb;j({aV^2uf3){n9b?nc1SV;I^`wd#1|Y|BA!&N6Vk^ z-ivxBIVjGL1Fv`)`Q;3#nQ_$t?x{z>+uP;yl(WXdzOpCtC^)iu=oBsTemQ;qfHIuU zl|6Bv*U!g8G?OEKFA0tm8Nbb%uI80AYp&dDQ=cvbWEo;EDU9GuJF`Rq{IoCs5;m<~ zxsP_VvJQkcy~Vi{`l0G!;c52#z$RD$X!uqRDFu z=Gc1{`zV(02{oToNb7vJ*x$KO>GujYF= zWBL%0VvyVB=$q7V3-Koo9Yd!J4X&)i!vT#}sgxghOH6kJeIP-@scNqFE@YhR!Oh&s zOW})F;{`66En%!(R-m*Lrk2-D%Fai}g~42SZb9f>^|f9XG)Du?qsj6U*!5jYdOTFa_EZQ&vziFhF)8c=Ta&iswPt)iI_ z-1)#%WO(l$sxSJU{qdaUktw}blBj9J?sjXmnkM4AQJwuPQT5yTgjtmW9_YSF-073{ zz+2<_YB#HA93nD%FHHTRaz=s24F-5Cv8}wL(UL0;w9}f`mLnrjJ-sXhO^x(d>|Z6@ zX}dO|kO_4LVfV|GKi}}2a_r5Q1Qb@@)3woETQX~K9Go}P(nFU#X)0qLyRj$`-?Jl~ z?{3fp`sy*~*5xK12le3}X_n||QJd(O!O$ig!bEwPZr24Bm`_eEBjSl~Vv+{-M)!@j zaa&vTxAvYah1U$daf6SnaJIip%kWWvzwwYz>%d|j-7fOq+ZXQQx%%B*DEF1 zUhtiE+jKmmkI>+iU@$*J+{}xrn-++lA(CGfUWIXGSwC8~*~9IUcOb{TRvmjC0*&#w>i3%WvEXLvW!s~clt>7hD(95MmYVK~p^J#3aUefJa?^C76Dq6w z6Okb3M?(Ha&J@RQu0#coWM$+wucueByjfFuyn;g`0}M29WwrW%aChIa&)C^1{*B|3 zQ8Obe+y|0L!!3L-T{HZhcZ}4OlD~hPHAp;WIPY5_W-wPfdbkTMO`b{UB+E2Q1)({4#_XnU!a*WMGF7y8v-rb*^V zWke%G_#%Hfi_8jdrIKg>Ux`l1vahHf&_A8>#`_ZXxZDsY+Iq=5fpA2L;6{r$5l+pQ zciRNS^}e`RD`T?jdcN^wxVU+!$aRj!mUYwMRK4iki-u1YE zhT0#Uu9e6KaDa2~Ey{7VR>;H{V=0G|DfEU9C%IOID)vGMBtcP%J1*f4bv%K|HNPV! zTl~?|jtX5BjO;7|t(j+E>}MriIn_&$A&IEQd=tN*2tm%6QSF@XbTV`98c!L_Km5k^ zyLHy@ZVSlKxaH3APD;-?vNLt==QsPg_VqjLIUPEMianrw!E9(Otk`9?oL1eu-H~Kh z-yz0J+$6P&()O?smD!>_nS2o8kk0T)b=2plN?f`n%(;y1THue>$N7<)`RmuDJH;dg zOE}}=)@lA5CnR0rz>e9ow=0B_t}zdi4Bh}$>Supi%(tV)N}HvlR?*+1_9om3__mL@ ztRoO5@yZN2b7IV2#7UEfTmL38hgdX9RC=}dMd>eA5PVQBCg)V~6&ED2@X6VHSvv0U z4Kv)>1;U2SK0q$%A}#b%B+-3KpV6c=+2MK|*<=UuNe8hYMKy@+Him=bW!K*IWRB>v2&I6 z-7%YaGe&l>$>#pZN_q?b%BkJ1giZw0M%h=xa3IGxVj7+B1!o;&QG+*GT(yAdEuiZb z)Ppw>#&j3&6Cn#eJd$?`&!@`Eaj%zGLI*xczJ6`^2MTWrR``YT0 zAC6)67FL(K{j97lUApryKgmU|17OS&b#r9SYiu# z3mK$*4}m(p7N>viGiM#O2b?zGQ2z&0ll@0s-Us|w)c$`G?#^|G{k!S{a-JCd-(zyD z2RU3H>6Oprh>PPo*!95oXJK~LN#FdW0yuN?PyCLrSa^9Stn9}>3TA5D5>YLiH=~n@ zz}+>aYg?m4qDnrt+ygsZB?q{(?;A;6zJ7hu zhp49LVsB!DzC_x-?OQ>EqhfPxTi>JZxFH?zeFo-@l^);^cUfY4nX2r|w1)%)Jvtgq z+16(zGJz9sH66l3A~~Il2CB@8ar}|2&d>mqE3)7l&W&zn#~PU}vSW#E(1qw=7_)26Jf6j=_TTHZCM-0$Sc>*48 zu;b&zuKQt}O`wj(GvpWsXY4EmGrqQ{ErUJ$aa7`Ily%i|80c5}eg)6q0!|_P^ZR%k z-=D?htd>_Z8em)s^L?79pv|*$E~hLdjGxKv` zyuVyIHurjtu>Fj|CA$AhSCh0(F zmP@+bgq5|G3#sh66bP3wNp_GcU|JXK`g=AaVq$AC1?p<(KuCD^VrgKh~Oonay2XDSQ$?XCQ@d47UKUPtmkFxFta?kD0I3S zi42FesFY#To1Tu78)?d8{%V~((^L+3+hx0tz8 zG|R#_na@7ii68Pz&#<5AL~=eE10Rh~X|zjYU0@^0@6$jXj*p$RYKu;%m^#-R0cqh% zDp$J#6}_Y%X%}CoPkn!TT2{r$y0^B)bP}h=XJ?By+ch(5k|ql*2R-BZbG#iraq5`%wjz zd+~PIoR}T{DV3|v-h&mh&tHAQhXOjkO1`LSrr!hkt$wT-56d0E&O($9jRSmz9HE!Ar?v-a;N%m-szCLILGH@u zB<1zLt!NDA=t}%eB;ti^JTn#GGb}O@Y9Bw!Re7oPG`Ztbj<_!xke>VGDX)L(Hq8R= zr>W3QMKWiF0GywhOS8D_Hz3|>rTW*biEqLdpTxY+fcZ@H0fL{P6H{=VO(o?VHa#volg3F1h=q)!IzNnycR&Z2sa z95}rd9!6H#-=Fo#`8aAjqB?H;58AnY-M>L!3|_mh`K^yJE?I!`G(qqA&r;d}rvX*N z@a{HAsq?4B$N7w)AC4i$A4Xn5SU^`8fC-MI$*!99OTpa-+`MG}n*rO<5MV_3ww5`R zmiV|8a$4+LQ%iox-0XS}xEm%GxBEt?V+?_FCt1T?VGpAs(3QO-tE9h z@xjLizkcZY%;OPk+MI>y_kW)_(wJz*FK{;u%F1~a*ewJV)xx-E=+Vj#+#dJ1)2C96 z8VWu={CgeJa+=|7ylFi_PsnkP?5<6;;>Qx1A?sj{H8GHvqnz1}Use#$Ca|uzT&na5 z7Jnn9(^`2J~xeJ^K<|i{|H^W91*}3hv_=1{Fd(!Q-AYMCN8HL6wQ|+^A z9=X2RI{cAACveEoXw+0H_dj4%MO@w&?|5h8aQP%BpW~GNQeCv2HL-zcuVO|v*&`J;{2%sLx-jpFoFk-HohiL4+Ok&IB7H^ED~yCuD;3i2om6lh2gSy6n}Z zNiw=&0XVt8I^Fj`%sn)w<)3Z5KR@czjRd6ijqYNTiKOTSRlBq2(6;3Cam4-MeHwYx zwV2S(a{YW#Z}(jkT;ywI5qm|d$)^hQli-%O52pN>=*N2RLq#*5FN1zxafCDWzbEYF zacEmoR)$XZD!VJ&39qexZ8_i$Y}?i@rK&K&SIuK&IZmvqPcii^XQ!x~Wm7_HWVfCt z8O1N4hJM|gXu?>s=|7JJ$)o+8Q)wh`t=pnf_>&oG!&0KCHi*Lk2YylCKTyKj$n&_ZOWR`$s}npUW-ZSI{`U z8#@k5K4aEXz^kd^sS>pPJb$WH>t)NkgDj%M?+s|K6@bFHd+SR_g6!73&{Rnz(AU<2 zw~#3>*u>8gSaXjXxZt znms12_kIQc!}Q2}WwRRrzeB6649Al>l90b_Q5RaagAVx4cf$omV_W&ANLE`6bIg(X zvrCRoTT&k{CT@$Etq%&ci`B-fr&&)B#}y&;vP0Kn>>X|?9nBKo1vA81&ti&w1NX#c zP4erxUQMb`)=X`RoP>XE<|oC{8teZpVsP0#`{sHko;<0c7Sb2y2KDjM7=JI#;xh|5C(b)6Q^d&}RGprwVI6l3_Nr2wlrgpq#A=Y`Il1bzAL@|@G zbUerr_+l$$o!U`pH2>lY^=@0<#hd4u+mGdl=!iWdbilYA;stQNQExbzJdomv-VZ(7 zL$%ENwc__li38Y&nm%0UY5M!Uymv`GjCn2(UB!JjPGs#1mz~QR;nJhi=!_9l(%PrP z>kz4je<`0B=ZEgX18&oBH ziyCf%Q@Fl59(r$`Whwbby4ij>=c%(>2mm7#%GO~8A!<&?Yon{FS;1 zvJUE;k9O8MV+?#Bf_K(tR18+Rhgw#`P4a zZpGT=v4__v;E@mV2==5Z=cdBQuKgi;q5WKDAv`TS3O8kzFf}poEiFtbMoWo|Sy&U<46&tg*7AQ8WP%^yKrF9Z}84_W* zcJ0ECisC`-2{&OZhAPl7we$JJm2%zDZ}E@M+olTpw;4|iQF^71rWIb>(UMCq@E+G6 zodlpG9?)!GLrfnX2Sf+Nw$p`_Gl#pXfItre>2}~DMTp7o;nCZpbR_2he17L4-lOua z@pU8Lfl<;!cdRTY9Swo!7(QCl{0uBYH$Y=kBCS+dnryg z^2@f1;~l$i>g+FRtw=^lt2W~WtU;}x%i9m15a0g92R1dlw2GoEI+Pkx)XATH=1HVY z-_h!B%~cIq?8zOq&B$HuxaDwFL9JW0Ee(r6NGtB4m&oNtZDoXIpHWXWx|CpjoS=I# z+7I|1f?jI%c940AxOCJ;ebColYI;ArE<~R%R)frSCSQeGc+vZKf_sUw=@qZ-E2@RP z0XGyQR#ZABck?2W!wF@Oz-&%E@UKb{^t?uSo|1ie>}~Us(~0(#MIA;S!aM$6^Isou znG>O8@G(<9spNmA{A;%Nk>-hd^L|x?t0i9#YAW%S%;UmWs4&Nf|3h~wYo`QddSp_g z%^!T7u?f9WX~{ zVkh?g988|UF+^udATm81X18a=pRUSWItJnB>NA^;rOM&uO>u`O z&Q1t;W{R1ZQ4h%i9$Y?rBh8?WJQ=|nJ+izDJJQm%`Sdzt@gH0i=+~BDaLFCIDLNHa zz!+H~zjwV@tN*~AfTJMwGVQPl^;$3O_wS$IMF)W@v6E`J2j?Zh%6A#Y57!O1?!jSk z1~6SLteZ~QI|CKBF*cKc_kR9lfD-)ZORNH5suxv zguOk{ln(Cqwh@di^u~*Ip<(>GBI0x}(5!$H{~iQq?9u7fL}XjQyhJlL2Dn7V{!o-7 zw$boo^}*u*hu+AUk~x$pE9?)AosivS6nV`5&6=p%jAHs9_{?&=d4ptUlUPZMf4B83 zv9-9oXAdnv>VhxDT^tIp_CbkM{i@4jF(&WJKqz;$PHTbhHNC-1ZzWV>S^owdiFwQ! z8z_h>o53Sl6tz|^`35S*u2TUU_@0U`_bmze@oFx8g?4}~l1ir3xEstWrivmNx+hFykXuK^cPG3#)@zBmCYY2xyex+ut9JIoE ze|cs8FN2yoDOG8X3!kSsk1WCCCC6E5M_UZS;j|;OMW<_f|0%rqTFDZSxj@Ra~R2D+vJ(RY@ z8wkZ6#@O)lz<(i*o`)WzpkcHKG~U0*@E2>vZq29l=f z`FMYem$}c59h^B9N{kGKOG*hLlPb$QOTcx5@`>BwJ-4aFfjEcEhMimDLb`B2JQglY zvvKAe#I3I_hb}mE(DYKuL@g(I!Zu?ytb5;DUb-~>LvM^et;G^jS7G2lTDkf>3%uKd zQZUqL(>n4Kn@&^C+H-C_8Ra`Lc(b7SWbW5%zRY5DM*38_-Q2TpDh)K4d>Z|O{47CY zm8)oBHa2W=$#!cGG8=B{=UY`->{Is)Zic(Kn-Y2@@HFG;?B>t1>#O0*6Swqu`!F$f zEI&iU#I!^Zoc$BC(zis)6MCtV#ec>TA16%q)n_0iK#e2gen8x3o{sO>74W%2tcSpB zf6$K9P<1)&*kU}TD#Ue>k=~2HRPO_M1}+}nWt(~UafdFrX)UMG;`Ck=9zTt?M}TV7 zO8LH*2qnpucG>b*xQN33dgG;Bp1^l@i`nB^)~iHgqrF23RmL7}LVC4M2|Tw1lGeYS zkUoxqaJR+@y0}zt)fRRAb98eRK+p2qu~{vlI6e{R&L01ZW9&M!4hgeTp=XgKDIxkI zuTFm9)n*18X4rW*L5|Y-PJ##0c)TNLcaC~M;Nvw|^{5*p+M-F=XE5WZjzi4WH78{* zp({SqGS{5a2+m)W97BnNkHVXKr%K|_j8x|;x~TZw{r%%-XakNhy2Unl?#RptRViqe z7Vhzdrt%s8kb%OE4T7AU3IktQfKfG3{`M7J;wNhZfs?k$2+uF*Z&_S2VH*uuG?3|< z&^hgvmi5slX-p#qy)ywe!!d5N_gn$geyF_n`AQ|l6tuv1=`tf<<@Ge!V`2@j zN9KL5qz`dGSV2{uSn;cpCD^|-Ti+JiH=T^!6->YsEl;yRnL00F9Z=sD=&Z| z)SXR|c!gLSv*EAQK-bHq*~SZrEoUAFNmqGjF5_3Xm1GNk2?_cA4iXwN@DS^O3@_R3 z#z66jz!UP+-l_6=3eLNxd}fdo=Qr@ZNMs;xXBU_)OJi%aZ5+;g$&s1z28jIL8EL-Kf~Q$!_9bvXI>M1$y0Ym`KF|q0ZV941k*_!Ha#& ze9HHQ$7%Hd4Sq!#^z<1zEJoVq?bq|~cX_6!kXJvsd$2MCw7=5+)?|M5u+tsjf= zX2A>F>hF&DMOP;k&T1vx12A$uZuky9;CKswn~Tpx-eS_~vn=B%!c`^7{vKy%p03e= z3&vYFbqbOPeA=0he}6EbzaYVw(Kp;k8E&NDf7hHeHvb7y{Vy+1#2o~8{Ii;NCwX#^ orKYV7r~!$(|CiS^%otI(iVN`wh1-JMyFVz(smWHpdH4DM0K=ep^#A|> literal 0 HcmV?d00001 diff --git a/tutorials/text_processing/images/thutmose_tagger_architecture.png b/tutorials/text_processing/images/thutmose_tagger_architecture.png new file mode 100644 index 0000000000000000000000000000000000000000..4729ce51b425c28127ed5edc181191e360215b8c GIT binary patch literal 47623 zcmd42XH?To*FGARA|PPr1_@DV3IZxkh=2%!^bSh3(5uu?69okk0RidKr8nuFh=@p& zP9On-^cIj_LX!W)`+48ztn=Zl^Wl6rtmQKKmC4NBdv>|@HKE#?%FK)$j1UNfS>>Uk zE(Aj734t6NIC&ggscITi1^*p$*Hyj`Dfz{@1isMN$ZN<$AZ3wEM^9! zEUnak$J$-;EFqA(Pb!M?`rhWNB&KnFg9!@7U6%Vr_VLVPstf`G_Sd+MZ%bTbm46&~ zps`rehqhpnzP^|8&RB9F0P);s%b)sFJN<)nJ86579s%>AzR=#>yQhjx^wt zW(@&W1Tt3K#Bh`q0=f2{D@D^DS73%=;rFn^R8d?PV7Lt$;xWkg`~J)jh*-h{hg9-P z=VOPOCad;i5ZjXmPLZTXrCO)fL^GYbSiH_cEQ9trAP`$f*vMVdkOReHCsl*Pd92ZZ zGV@DrNrBWecOoD{O6C?51oEal$M*r@RRG++0Pl+$EBD;0!8rDRuNIOvb=zE^+hZTg z^br)j1hITe9r0QLiGG)~JO>j-+N1^-Vm+ZF>)472n7S4nr0=DnqyI*tB(iCt~P9M6ESAR2SKfH4aqTSb(|HnR&A#Nl#Tp1iwI>yu zBIiSHC@eIpbn5?YLb+XBU}gkl^?A6P41}ru1nqke0)9~2Vs1rY?Mf{j;;xlVD&y-){Sc`M~$REWbHxdTJ+ zmZ9HmxXD5>VQ9?&9FK%|TbMHu(x>-*#P1gI8iNzXJBj)-qM znm70((jbF35=z*+36@0yA5?D^h`J2tVtJC~Q@+>QYF!2=F>opr7SWa<| zaT{R{hxVv4I}~xZb1!bfyPx%t)4d+NbWds&^#osoLRhs+q^uteaRZa)(fhEE9Z6ZI zc+Y=s6J*6_O%`}=Nhnv@WAdwlag{EK`<#%Mn_m!!;$Ja-h~c%d>QZY&agP7?{$R7j zW(1e=-fhv#=MXXnZObyr&QP+pK|=``7GgDxPp!4rBK*ovOLS^Kb~*7#4nC3e&rgd}>+wln5jOP{e8aC)Zqu8Fpe;LkZH?)6)oh~seTNMr(L#r(Zb|t#GiyK+4}tp?Avg2qN2Z3|E7=i zd2s(;Tc?S?)9|6>wo7tEarZTF-vpP%lC~+~WM-p89YMdSu?-{P^tcpdQaRh<0C}m} zU*t1r=7$(P1MWg%1yCDRP9dnTF8u{{?6i>dez^D42PicbXt&gkpQ1hkfxL~R`l>l6 zBj&w4>MiEbzuyU=v}j+ijdNbW`$F#ZQ}IB^3>=g-cAwAYzdh%UH|!`$m!@3Agh8Igriyv9GD|$hGcdV=d2b?oO5Q`}Ue)xdFSuJyh6uFrVE1p8a{dp0=dt>S!5x+AhfnI|Ouy zI^16BFLeNmmN394SnHi&j>cML$m<608wX1`j^4EGOCK&Y0&)5RnYh-_8!>c&c(P}s zPcwTT7lViP7p&DSaKWc<|mbM!vew=*(nbSE+DM%G+~Ct&B2>tDY>$R*CsFflX!_^S)@Z2DCWP zfLJek-px-RA^UAplcs9tiGn7Hz(;g_FFj5wGFcWpgMn4d^=W4PUB%d!r$GgZ@aPwl zJ`0H}{-TkqC+BG~(PJQ-jr7DD?LhEWXqcTXq|Cf)&p}#OlLNa_wAZ|b z9Lp3Kh8rZXSJFAh55F2cr_j1q{@{4VXQQ%NVu+-Q`=6LUGBMJxN|>?72^A+ zgiDbbx1Wmsmj4?!1Mx?^<57fF3BN}0%@7%yA$0P`!ZHa(dK%0ydYw} z1pR2T9RzD-qcugChKXfG8c^ z&pbGJ?Ze$kn7w9}lQgIB*A0)wfqI2u`rc82{7VnE+-!P+nJ*1p`6TI*Ds2};&l(Xx zZ#W-3(v*BXd`gMm^LB&B(lkPRIEGW!-y7^qSirUp#Xd^H30q`b)w%9PS0hWSD~=n_ z@md>yY~iul!SCSRlO(QM_I&}CaO?SxJe{o7hxSW)@P~>1;(T5AH@Y&!elojS*?fI@ zOfBw48Fwn!(cOun?GZfxmBV-Qo8f+IbvD54y*{hN%yq@@6%)Oy{Gsr17-PES84Cr@ z{(WpEI-s2;KfwKHiagP~KAIB-)f&~QnERDmf9eYKrl)zf(& zl@vIlS}(HEwl{)j--6AAw;jVlvr*+M19UEZv*%BuO=|v$cIfB+edASbvE9I>3n)Lv zGVXKyn%kxI6XjN67Ho58KuXFRMpTG`3F1wb$w=T`{pKS|C(Rbt8Y7(QKY1QK0dA!m z>*R9A^u(#uHooQOy(w@>Uy3!KENMwgD@EG#U~_J!Jrcq8>=R-v$U@p`zu`m&eIaXGvs&4PC-Z7GC|K; zlObA0L0nwi+Ld`!<+`P;4adev`h!j46S({3?`mbT4T!1Ai4MxRLbqM^-cHAm&Z<2rc0a$GmD zH!+7(I_VBD+6o_QW+zp%MeTH^#Aa-F!pq2(>LSoPjFFy5zC-Y~D$2DcH;-I~E#|HK4>vkglxpiMTk6lw>_e@e(MvBr# z4gYSSO0jtp*7DhlQ7+-D3tSt6fKt+L{oSFpMuHZ`(vQ9n zJr62S37rRop{W=pk0QU2Rgk|g4^EZKwpw(65)#$?N0av_Iu((ucF}1Vx_Rxu0OfCgAQH>x6|I~faA5bmcRe5xy7!qf*}c{E(~JC@R05rE zQkjtqTi>yH%U4dPJNHpmqkdfpBnHMNpp>nd$?q|davvFBMP5~!(VN~2k!D6_*_y8v zskm!>p!9X>L63#@;3=ie&wMW8Slq3Gp z-jmm{bN;L=(vOa~u2dnkFHC6m8S(a2(SwgxeFo7Y>sK9AMa@Ak{tmWX;^@-I$Kz>7 zv-~mEd4t6!l2-Dk)lV@7%Q9EvV>7+o-&wGP*SRm`gl1jOGT@1p^;r(S60Yl(EzNi;dvvxVW+D#cAts@Y!4C#u-c!fmZm+!x{Pxzocy4RG|7#|F zlFK)5Mb%0ub6uQ=-4XaeCS{aG_8oCyHG#39;b)74IQtP3_WxCX3zYaSr~BxeBc2 zDM+oTWxeg62f#UO)*X|qt5u2AWbJRe{`;b&ez^PUc)sK&e|>NZY`ubAQLDb0?i!|?R%*`sgw zigb7(mvr~ee#=@$E|)5|9;(IYfx6MSpi+D(PUVG0r2w2pEbm>tj>lx9PY@lm>IK7E z3YoalbUxF3xtCriL$rzQH|+*3g`58*`{#GJ=GIdE);8cB6ZM{I9Ah2YO;Lf}iLyRP z&@>rh5VFWx6J~mjw*eLMk+fr#@b^K&zYX1*aY88y5nXp;1Bu9xgmaD8y80)Xe;7N z7W|3Rc8TMi{;3O)?^MER}-r?;BHY+U!fF77i;VwkIQIyggu%hCSFZG?z@u zhWv(gl19 z-Qc+rS6RxHr!rn+t9%wz8gLNZL83J&v__ARaITcY*v*LJfY~o}CvMHgXhoFz?UhIm z*wrtpez5&|&CVV~zk6y2XRc-sgWLrJGFkiK0%}%mp`d-z0FtTma_bkobW&SK*IM+* zY;m|SexSz`%l=)%^o44&7DBlHb2isKRL}$Zj2S}F(g@UGzLcte1YVP?p6KPMZ*;v>{n$wWnf%Ap&MNijp#_&<+e_-3 znn_|Rxb&xTbOrBUV0x~n^t}A7Jg8{0=u%Ps)#=eHG_q4-tlbyEXO9$eR@M#szZimP z)oIg94$Cw;Efq1kC$H8C6!PJ#tPLIuFm%O>>@higoL4va%^!|Ce(%+3;7Z@$3HSt+ zo*v%W9TQ{QZocu>`RF%{b$mCse=v=*)*8mAg*+7GIGGxhQ+41PZf-zY)&5qLJxDDo z6vY5m>iR=XuF+vP0e_8;0bmPqXhS{vN8t$KxxUBfA8=nGa@lCGd2{ww&_^sH!clJP770+G{9k zB90n{Q4mj4@UxV|ksH~OX?*uiU@HRfel9L@-cx;TCrWDV6lKg(l_0Bx8{AzoG%CSSh&5^;I%x|!NNOq=|ZP=!A_ooFOg7_yPkeA26Zg`?28}o*^!Sx z`gUbmQ;?qEeHCIeIn5ECN@I3tWbV_(lAW$u%g`H2w>o5bJ8f{|L@D-WtO7jx$$d?) z&(7*C4{8;O`@n0|%M3RQUXPB;)IW0gUYYvWKbW?*oXh9(UA!WiF0`$8r}ne(TRqc& zonfzQAwcm0V=$lDGYDeEJB0Q>x}pE0da)fr+-=CQ@687+4iIgP=t7eqntntk@n?h1z2cgaU|Y zl7*K>%PR(K|4YjQ8DQ6^%2FBtTA%$JDsEEg&=Tk2IGEkrzjHTE4TtI-XW;YCLcz>E z_V*4Nc~Y8XYF`RJ3T^h?-Ly~#7BsKM98d)l0Yw+SJNx(ZXXHPTX~c{s*APfS`N@jGQE~B}GawAtK zONn=4_0%9wf>406@6rB7Ye-1xb5(&YDJbEiMTw^DfhBx)qgCBur0CWOtTnp(6#H$S zT3a>R)fEd$Jv3kIL!-Q7*G&=iFWi&A`*PNVGLzOajiQ(5`$pkhu?FRW{YSX8pqmH| z5Iv7Wv2FO&9KnEi0 zs-JiN1mFHZ)>f!f6H!T=+J|sbzS}h-GOD-7^|fFZCiV-c5G?rAI>PBR(e@fB`VNzM z_YR-(`ONRUp!f4AS&$e=6KNb&T=e%*sR#KDscTHP_%o0y0NpCML$i;q)!8XGR0BV+zc(3&K}7`Uyf=UZ*{1&9T{Lb#$0eb z$l)x3QAy#{tsADbeQ0(K;2OD_zVj_ESzWaVz~t-YP9k4d3yq%jECBUS@KlRU8{4*L zOz`soZQ*egg|v(eJNyl5qAHl><*jQ5^X}3uNR2dEGu*{L3$Cp#fjhEA0!>r$>o%WU zgP{dotc)%iH(OhTGReG-`OYliO*1-=d5JQ6<1_bfW+{bt9&L7yD%-wdAxyhyz4GFb z816-y8i)@=5##+$ehBDPzr$E&CN7RzNItvV9DKCs-n}$0QoGTY&@>${Z0z9}BF)NX zV%Bt(BR>kX8;VCCEijB55F)BTfyxd$-`t;hAYwQ~t=}`;?TFEmwM%8;a@6{x)IZI1{2u%6>)7|8Xw#4P>r0oH z9Dfm1kEg;$mIEZ=h$rZ7LB8X{@8_7NHKc;+ZmUmT=J{RyRB^&YG0k`Ry6Be|zwe{q zq-2HX>)sJLT4~O7l`VR?n&PdA|C?Q$w^eq_;Cvrv0G;BKP4-|qmQDlEm8S;llly-% zJII6(WRY?v+c7DeLjgfM5Vg|>>L_8*yNTfJuP9hwtU&uLzcOVy7fSTHyjr%WAC#M_jJ!mJbyWqzKie>l#7Z}v`NxG{NZxtng70vS{`nh>91Wt4n*%nc*_Q-;&E%LyMob zQGF4X7g}_{YO&?Q5<3b{4fB>)ahEdxUkK}&$Rq7TM(B{h6^sQc;E$)k530#RkG(vr zg{#AHt1$iG5)Nh_@Kt1h+C4N9KSUJU z2Zyj}Xx5M2<3NOyR94Yp^rlZS(f3{mMGl3-q&@QR6#e$~4*oQC_Zt1vQ*WF5byv2H z{<({TuVX9i&UqLHy)cQQRf(Us+Y27(TwhN9(*saQC6MLuv>^NX0{z#A@?*vtKOZFq z(d6%4$XlXy@FVSD2-;@IX!|qhhcw5=@T4_F(wImYxhLbSU;8E1|CJD9CAUw86EH)f zVX?8e&nCUg*jWmrD&*SZ@0bs;ig?PmqePpK>`)jjjMgaoAh1T#8Jjr!%JA?( zd3FE0iV)uZ=-VU}OWK`yX-u?mp4T7&}ljY%hrMXQz8J?quXt)^Y;FGO6 z|1*l4gtT7xl>*A3bK9CH6qd~=L=kJI#Q?3%%UaZ8mvsIF6*%RDdQqnP$F1p~z?Wu* zY~N{*rPIUwZ$5-=H0&V{)iPoPt1qjHyO^hh?G2tRp(Ker7=E~1XjDq>k3?E3FP}UA zOPR}uC#g~36*7)C%vMA{nD8(Y;=t?0bH%&OW)<4|rdfaW;ST^)=b)a$GNib$CN<8( zSA$VzPXV;h4!wTdX;O&6>#hzzP5jWYdkj~bgP-Aa_s(p#%uBeg21&bZU-wDKM6%^rt$nxxR2d zkk{#hOT8~==vh~N^4UiWWg2Zarx6SJc~p7?U=j%YBWiy58#I;Aavw3yGPP74CEan& zJWTYOefXky5_7&fpXgCA^^4E*3`wWwuKmz8_*o2COTEWZ=CIO?>0v!cWN=a3ZfFkP zlTOs&%VL!d7Kr31z}*Ev9$FL)k{y6VGRtf}>q}pNy~eGfDM+~f@uSxU9X_A~L3n7^ zDS{IDcv+#*J8l=#dpo;WmwVVLRYyPvc_irjJmsC@AC|F26F5n5s8R+LCS}~Aed?!A zfAof@W*HP3LREW2GO^a8{hSFPwc%uSx?r~ulXmnu1$!EI&adToza(FH1;DwnEW8rj z#(yB`L!h`Qb1J%Yb!jw54WKJE+^GO&XUxg17S-U{VJMU^*5tE&f+i2Wzj2LAj_zo;R{s zZ@TDEU~~hCm;{vy1tTgsNiAH+yD6X?LG&l(658)6m>t%r#S16WXgi}xUht29 zKp?*MxUx3-Aw%}lVnshoS?1$O!i)Jn9}SV0(_3g=%7(bEw^vn#h&)o&4&?yQtq)T{ z%JnO?vJa&SebiO@KFPD^8NC1sN{iNC%F)g6w10!-3jgQU)mknMv3`*_VG*8dIHgsn z%eB9VjQ}&rlJ))o$UH}xE(Y2|V%dky5cq%^)v)A6i|Tyi!oK8VkUG%rdPZ$KURJza zv|c6uMJbD0vvhEzXtK$*4)K`EDo_#0!0J@+dOxA;HhHdnfBz$NRXY_BHGcny)Y>pA zBsBgl>}d$iXpuw^wu>h?N@h3p+UeHa`xe>gH9vvA-^4cPc`k`$FM+zUJHqlZ?&w_# znszvAi@twURvCtjDS{qTRp|K@03!9z$m^Eh)^D=1-?r>dxaGwopEe0n`{i4Qg#-|- zPTNIO>*&`!@28Sj01_bMHi;f~XnS`7pzzHz5%8nbf5D5z zYRmpDN#UdLNo^4Q%h*mugiZYcUE=TNUrwttS4+EEWvhk}+rHtW7Y*Vd+JSFNGe$ut z(WiM$C=O7V0**f9pNN2&4wpEzOPGPiX|qz0;1_G5l(lw*(kI!qk>9N0vczQhm+FEW z3J(s2Nn=s3L2i8*=gQ`(;!XBeRlOqlP;WW0=B@p^Xq`*!G%~L;uhbe|9r8g= zy`m3XbGQe^Y_F^QG{rXuh(3<=9TK(qHMED_;8dKsKBjxf6FzijO^)16 zs2U)rL8BHF!SgG95wcq)KXEao(2g))^!;qX0MBC(-oSkhj;}EC#=X-Qm-?4Ji?Dq1 zUilNUb~N-}Bn4q|+TgKX(=*VK_u^@J?Fss(+a~eqgID?LZ*2^u+4EY{ar-7d+HSD{ z*=klc&_xNLvGF%q3vd+^+)p@jjN4eAHU%}U#p%3G)io7NNd8PMgz6TTvW2bB*FN&D^;{*1xjlBR^;ePfuKFc@8V|gfldAnX; zypnvX#zGLJcRA75PKv!X%v?Ggir^oxYzd@oae9Ba3@Ya9>?9&4@RHHW1QWl7yS@H( z(5=0|(svc4Z%v!uB48Q1PvZtodFejOw3cWQXA9~xqUqquX8#E1>(3Mwl%@_T3r+*8rl_2?2gE|^DCB~ zCywwA244P1%{K!@3gQRk^Pei8H44dQ9iXC*{gOO1f<6KpS83sAqd-dlbPzdp_9O;A zQxU79x4HoSK3egqvGR5u0gcMVROL7)WLp=0yCXs6Dm|^=oS#}Qi2(QF$>_b6iw6X; z)rSE9|DaFxA?d&amF#x}*O%~#h(=4LOP$>AIp`Xvfa~;_ZmDO>HnQGE_Z~b3B$gGk zLcs)scyT&Pwf2rE(?pF&uj(M%0coeig1^6 zI7e;ZE4>Ph?RiGwmP6+B_$^lUv9W{j`uBC zkvhmoIH{z`qsp>&E+5aCcN!vX&6+k}H-Z4#O#se;D@98TV_7`0Yq(PqzEVwcKls6h z;S9A<^mH5qSxLyJkpS(|f@a~pH#TCF5+?9vh%Nwb53bgpgD-Ikd6Glp0gV2P z+7paT%qtG?Bn1M%P4k>R=revIw=?S=_ab-)hUq7H$LxB{&07gxML1h0Tip7Om=1NV zK3yLrvaab}tv#7l{S*Y0wN7QiDn+<)5U(Kw|FXa@EazDS)q3b#ca+%aEm5n+g#HlMZ)o@6F*MVO*=euC)2!D_#AbA$}k`UDW7b8x@DYMMxWiwCb z#~R|7Adoy75VyU0&nMq{PS+w1vHcEsME6GMIORqss+PCq*8HgvE_Pk0N&zt1HmS6P z5c~w(=GW$27pO;{FgdNpB{|Pg+2i$itU=G6lv90Qhnabefn~5XA@X>zy~nwpvBToss+|6{emKr;ZY)WH#4em1$i0+#l zX{}eP(E&D_gBFQ3Zu0;JS<4%HCihTVrSE%g9A&y%&P>!{$)m?njyeQH%e0ATYq3d^9=j(C)RAfpT!5a%Tks4^);$HQN()sCO?20=nce ze&zvmL(RH4^-X7Zp~LpN9e*ly^#8CIe;xijI7IaB22}agy=I2={RDh(%LQV{j00Qh zA%iHDlm3?nS?nIdYIb*uaYu3zeGsk$#bgtVqCFrQ<=G|D`o{07_+y2s6cc zVb?$$fXGf*C;nrQzQxrEvj$R=44~3BlHZ{!VyF|;|Kk4xgRNQGuZlw0K3NGio6iRn zW3k(Q2uyR${GV8CTAi)qx-)-r=)+Q{FqT7va?eFk!0~0b!)^X~dJvm(ER~n;F5Tc{hISw&o9wN_|Mbsj5mz5PC(!3<84ybj5q=s~ zR7U1|&|#)>#^J{y*Jq`g`DTREya(T^5KIu`O#%3NOpSgiuFj)|B3u8U$-qE^!m@Le|&2PC~)e|<6!6=^>__Fua4mi@}FJ=9(I zf$7*Sf_Ds`kg>QC<$DCMS{CCf=jL8NkzM6TIRZe5!g2R2#8+aMwkDILittsfL^B5< zKy8+|;$+I!8^o^6?k86^WG|pgTW+FAN&R)LA^mRO{th@69X>O|kvBLQ3gE0xOZ zgle?zC8yx)y^R4$dn_ODeI*d&de40Y0|JZO3Iw8ieP{;|P9aKR^Ti;S(m$b9yq}$T z9CB~T4>=6mTg8u6s96L$K$ag+PXL@B=jBH=KiD$t8tJiU*b#y=Kwh4$rhIL-!S7em zc`%CVK`t{>39RW;poQ-yU)m*hb7qcW7}=&y1E}mh#u5R3J#nC@Tt=k3gpAZzg+%PARhdK_`Qd#T>S!0 zF#y=6o)Aoang7}>R;QAGV;XROg zPnK!00j@rTStB?1aTjQj_R86NKSDc%>@|$2)BlI-{-DZh#bj`SLX1c%2Ieje62o}^ z?JdPI@SX!W7SLOII8)(r{D00XRD*^KvSN19YGvM`)@sWTIAzN0 z^WGf(6Oe1opl9pvNjjbqT@8*r*aC_}fb&<4@r#U?P5!d_TOT#-6w5Upeg)AEyYV^; z>v2nl69Qq(1t&9>CoT~l-UqKRuk6uIf>*fyiu(%T-!mOnD;J9=LJd%2vsg#AB1e+?ul_f*2z{yQoS75HBSKWT&$ zrq09<9^E~{+)(Wd!p_~MhE#t{d`^waV1LA4$-MmJO5)IdH$a1NYccqD`m<0BnB44t zxYDOeEgF1(8YweFZGbPJPW9W=;s0numa0nX-#xIxe?Pt4r$&Rne;a?#Ur;|z16BQd z?K|KF{QK!&>N)lEY1sd&_y6+&n{z@~pyfdj)k%=_%1Q6>dwrHQVm(;S%a~a``FA4M zPL)s(n^6CzXa2jg|6crmJNM?CQs)F0`u{B6GIZttPZF^hG?MJD396I&vaa1z4H{7r z`NdM+Uq&<)PuP!?tAvecm{i(f3lz5EoVGY0P6VX;lGZR@1e6Pgtl`JvSfMBQ2sfXC zt;$>O%Ov(YX|4`!-hp4pH?TeiXYAa z<5Xy7!*B31LMVdJw>%2|TN%l*2(vCG485zQOik|DPuhO8`a0_tV%_ea*Q1Q9TC~i~ zEKxFrbQ(QAZC5*Vv5@H%EoBV1WlCat7X! z^;Q+TxgLi@A;x>vGZ*GbO@-#T-CS%?!>Tv;aFJbD4A#Qqen{k)E2qvC+I0U|>;CL~ z?ZavD3vS`1`Ie4mP3!l1uD~O7hl{LYgqw`T9r}kN+TFf%yo#5gpy0bMAhF7D+S0T^ zEn*FOusByv`7@hy*`Kr-;=}Y~Z%RE9zI``5I%Dpt2q6<%m2B=!k%#MUXs(EDj#k6U zxG&7~5PF*{h!5bp$0D3bQG*TdE4lL;k83xp7Ay|vUt_zK#8_v}h4#}E;?26jS>#mi z-iidUt_g184134-)Dt*EM`8u^Rmj!ZE;Xv2*zyIQx_vRV^yc7I=z6e5$PCh8DH`47 zhX?X$nBzAV0xozl^!`Yii@j(v*0|R*45v6N3z*Yhe7R|$`yf8&e%;%C{h!yH4I37w zQkR3gXn0-LBZ9)bp%^Q z9ke1XY+)t+Bya3rRlwzW)mh?FwsUoESFX+u2w(kv+VhWgN#vmrcP!_ViL$h`520V; zvvsf4i(Tl{G}L%AKI!t}a&EkEMkD9X>@$qouNhyd){aL7g`Z+))PJwa@?{|N+||{& zJf3&L8gG<@YN$tfATOi0zeidS$vKiQz z2*3@>p`OnaK)o7OVhG6&=BcE$h`l|mD+KaEPQzm2CZ5H5y=m@KYFFbBTEkk>uGRKBv%Lwc_b(&`UPcgkdMTW8TG?N!yUh>n`oC%lw%S zORm!IGVD2IC6v$>MmX5mbf$$h00R9L{DX(kVD#NTIh`+qVrR)pj&gmV}rl;FZ8+sqRX2ERkVJGl0t2>!7{AP1CXlsBTSc+TcbuUiO zSWmbcaXyJ3d4%oKD%S9HXjT3tbtsBo!_UlWdNq~+e4c;23L34A*Hvhx$wH+)zQj!B*Y0B-N}G1~XCg(6D<8If)3hI$ z{l+B1DdCtvu9{&GkOsI-1^0DR*R~667XD@I$knoTKvm2DXn%H=a%A&nr(n-^o-fAK zz`%2*xCR>)tk+EViEW~9mfK`^Z_?#(f>$5yCB`d?Sot0tRhv>YXYkNoIefp%h z#fuhNBQwufsNH4WOF-D7`obIHx5X5~Z#U=g4MO8(2)<|;&9&Jy$TPQMmYAm=w6Pu4{6y6b$yR`gAprMfc`OD*O{v}*Jl;<@$p zz5wX}t)TH>W>}-4fza!xt-(t};{^tS@O+hXDI2-!iJ1ok@7wmL#{IW?ti9(tW0Nc> zNx>n>T(KQV{OSp(D^1>&mQOn%COk0}y|NCNULAXDl;?)P}}JRm!^k>X#%HX`1fzuu0;Ha|^2vjV0ADeQYlr zwe{b3yK!GSzBiS82sV9d1SVFXd3Tm1xNW06pc^y=q>YQ37TDWQ(tIbq)kNCGf%T`P z!ca%96wJ+(YEOwO?(2Cy_$Ysg{c z-$T*Z*v^>1c{v{J;ZfRLZ-HQuOka0<#O>#DC>NxbdM=i+b)V4~rz<@+Pu?sBd+Rnl z`aWh7ir)J9VJfM7TI7-L+fw#kS>o!)>w3B=St5;pzBR1XcSVTVmD~1b`f7~;7Bhf( zg4w|VwQ57>VA7IKV$oyKys9V491rI;-jt)zEv3kvLa`Qc25kIJ_2o7f4{PBVac;IM zA`3P{aUx(X7Dk@k2@p9lU0-wxaN*4_Y{5|zvqj+r_3W|wQ?h(5)=d1i! zk5(^*1*(NS6M0)+?K&J`o2PbAGG4>YFmKlefZlzkOq#@%u7OWkL0a&vI5TeO!`#&`RA?x)PN+>vtR+WDhSqh_}ram=XH zvUz0ALGK-ImE@V}FcGi#%hpIzlAKMNY=Cg>)N#>OeFK9;oEpjOTJt6;#jrR;CGF6? zbg)o%t9bAzJpmgf_m4Pcg665e&vM4WDGoD=67U+b;!lWneto` zpyZ=4bL}z~5f2{Lts8$}Lze6jSG+_}$Sn3)vAvMyqam!8IbJd;wOe0Ild40L)EZndI=y3zX&cI+`FzeSd%BM&jz zHpnvdCWjG*XH#Eaar6n>lGczl&jjAAV1Blkew^9?rYX#U-*G^b$%4Hk>sa>YZ8cca zv_tOEW_5fWQfPhi5oGA1f!c5cPinGPYo+{rgl2%px=|{Av595`JubT&SeF%w`y&3; zh2lOoZqaO|e3p483NPKW?HpLx?eS&hLk%}`*^~VI(G`qgiRoCb7EJW+({HZr=}%^hDP^SZR8w+17PGoArodnDI<+!_Ze__n%+q+E11t$6<_ z3*bD!k)T_x`TJXib_q}Yg-D-(k56LUc*AC%$9xTW=K6})EEZG48-P^Y=K2a`Z-lab zRIFYl-t$EUC){y%7wB!v%_husiuiO3*&DwiCS{Gvcu-K ztxAzxX0yTptv`TX#$)yU!%y{?afg-WXN%3*6UCylBAnfcFcAa#gni>Hvt% z)4I+4+|=9s(Sjwl_X)?gmVhhiW;KhL_XorylCt9xjng-gXjyY>OIehmgCUbV#T*FF zelp?ugBTyZjJs>B6M)72JW?iyCPl)@*weGB;iHa^3!UP@85)eD3SK)tH2Z0_1uaR? z;-Is*W^#F*c0i58G{#o6KdszxjXu)xqpc@C*V)v%r(DSEWcF;#jONJLVV7Pnv#?^M z6B^N8=AZW@CH#l{gm(%>pLInsPX{4YL)qT8Ev*G?MtQAf#XPQylkw4))`o#O_Ik_L7o!aACf(C|O?4-RZcl_mxz&u!`)fxF1D?G%@W;!f*XP}p3 zq?LvDhVL{~RQrgw|4ZP@AgP*V3pUdj=cxGeJ4=J@tR=B5PCkA6*X6UhEc7OoFPR8O z%+n9)+a5_+UEh*SzGh;+I;P_g60wL~2?|*}!2BpoIm7(pK$Iywa=>Hc&&<}oUV@v? z(x)9jnK@bUO~sebo#chSFItm27Bw(mjXqnyJ^O@PA^7Tr6wIW*YX$q7t6T0-^I0gZ zZj*Bj1yc2yZH9Pm_~ql2ecn5}f!7dS3ohjf3{8iKG6iKCJyWAR;Pf(hm~SUs48&`G z07<|%me@uX<xyEStNL9}Q>sl}ib^nG;t;P;dsfxGIy7YNGj}0CC;bk3 zJAz$wexn_R-uQ8eMBwkd*b9QJVC0dp!zb~gkOtewJsNV&abTZ=ef2!#>=9dRrt2f} zkaaIO&nbfW!=@Lo5jsl{;>CYnRjS(Hu8!)Mb#bjh04;!_n z`ajgYXH-+|+V5+_f&z*PiUdSD2q;wu5D}1Gq^dL(ks3jIO%xOnq=N`Z?;U~Aizw1N zgpyE{-a>~!Aj!Vt^Q^Vkd&axg8E238!#?wqW0)Cc?s?bi`u~3yG|DPyX1PE@`Vbv6 zc_RvaK(k8RbXxs%HL1Prm#t>)x>Mb?at&=A@TVTNOAC&NaAya#IxAE-<(F1d;!kLPHRzZr5%45>SNESuN{HI$Z%G0qU(W0Y^^?D<*zIo> zPoDuJX;4LEQPLBrENtjbY@E8Ch54lkeZc;_J-p&PM~3|auWunuJ`Hc`WiSf+>+fM; zp*sPc^1z{n%O2w@AzHm|*3)Th$z##W2Q*QYBr)4|L(VYF!$ucV+30Jt7X8h!Lz%Ae zlXp54%D1>P?9J#mz`uQckG4LYx5qZ)VG1S;>J zVR2D5McCjX@kOT*Ckh|t9&zE?_1avbR}$holO4zaoqAB0wWxBizo>{5{va28P8Uyz zFvyki%dSVu+SrbdHW}YZTYOPw9OgHh=yz1OEZoBCw={Xg#_h(n&3BQ9{^wP{utv0o zwJndFO&!~RG?}W;Fup$}6mY{GY(DG@yt**!>=nce!cleHd+UKrMVLz#=DMCCGr<*m zXZ_~mvQ~ugFqm~5hfUmaa(*cdbfWAqZqSMIs>|!^@sEa$ow~rPEep<33q=V>ISX~f znJlB{6TP}a(@$2rENALmwtxv1o)xOqj?eWi%q&favigW&<0#$Tfzyp|rb1K}+~kIT zRn)k;6sfh(G+ykpO?>LfaQ?oxDH(Oq!*(Rc87 z(kobOvRAsHH$57M4|sF@R5gpsZEIwg>E1A}VGLHldge*h{FB$`;Loo^{m1J~&?onr3QUo=3_eek+qzM_Cml zaDZMPyu&_8K=McyOPBgxZi8+wr<3km?hZ@?>O#yQWyCfdpq|R}WawWwps8$O@lw~g zJ6j0RQX5@Kv|=bbh7K@}m_qylN2nYx|Y{&1GFA ztq$+$x1g>$;U!&X0|-q?h%5JkPino;e?=ITU|2MR zhA6a{>Zj&Tof9&-ZCROa;;BF9%5yKeJjW{mKo-)g`MCKp=Zy9}`;$nhtOhc1O`i_C zA*@6zF8rLQe|>VO!qa*uUg+g6*|#)4_V<2^0rr(>_Ai@c^~7U#3?J&Qw?F>4e5CWn zPqXtse=ET#TfI`(+y$jUumjs0NFs?cS>zQKYPwwhT;B~^#iOdaR(@*k=Qn^p!L{&y zZn4^v2%O94qc^hFFHw~fJJnTet;Z{;uONI*l9#tjE1}W6+tdr^SHXMvJ@vxp0MX!9 z%OoFzca8v9K%xr61c6tJxiE;bRKod42(B0Dzr=o>1rxhsnl)tkWJiR!tgox(Zx&VG zlUeke;MgH+ySD+3JEHC@wsUSz4COiAun@ne;)Ut z4-F_`Kl5{~)3TN7W?87s*Dh~OoiT)m3m01TP_`aWQBMu?k(3j(Sf1B;tHN)X5>eored`j!>4-lOa z&WZJ47vU#Z*WDE=v-?@9j$$LUEzt8-?JVk1dyApLFrt5uXxs{~!MOagke_Zk7LPgI zy6Z1qa!%lFk9Q4O16FejZPswM8naK|JmwR^HE|0b22t{(X1New#!Igt+F9gB=>cb_ z()(V=WbR!wsb>TmqpsKw67DA-h>Ov6e%LW=AVeRu@dYD@gwYfQ><`*#dLaHmE=C9C zE3yVW@CJJ%k%;@F;DF@;LQc6+pZh4=B$68H%dh5B4arf*;PI;a@^msY2 zP=A6o^o4Neu5E*8z%w3_$m?JbyDXPK(|BmrVjt9L#;{+Ew98*4dKjLu$DV$@XK*+qpk3R((qsJz+oP zS^6?b*0cZVrCT*IociuJyikZwnhLlG(q~68a(EVwDnP!2XJ(>7SOr)z4IpP!AQ%BwuuehXMef7%U_)aGorzC zp>*vDtlQsu>530wyYD1fv@|A{QTMMz*r=ENzA04ik`1rycCS3&)yyWVb};Aa{`~$0 z?CFLD(L@4IYUR7%oJm86Q4~S z?ydNJokh1e zw7ktHQb!5%f@pMAgEzm`2Ly<|@aegx2Rw_3ykYoVia)Q#Hjyp2TFt~PLA60=a=IBU zGt|~Y4t@?$KCV1s3BvmFalbw1DW`>^>Los!J*Sfefv$IDe$sGO88R`iO!#$>O03@ih=isqT$Fy*s9(2oTD4QYqSk#aCdmnjaL|aFUS)UTSQ%`R`ANXf zj;_hNcLZ?_>|`CiOg~I}i?-z#(~U}l`-f*O)nSiNv0QOlkviP_Lq_&HUYik1->)Vi z!|m&~4n!;$e9sE@Ry{hcq9{v7A9@sly0;a(lvrLN>mUx-R;dcrRy#gYV={Ezo^62tkIB`ZHgZ$caR z9-()If3)ubwXNi0Pl~EDXB{TN_3*UVuO)cJK77D)-Yr-UnNw)8EUhG-ED0jocb^Lw ztx80mL$5-u-p7enfpA-Tb$^#N5JnqjCN4Lyp5nAPIEW=-reXR53wr7OU^*2VyZtTX zx&Zn~?qiLgc>`HC7~8@su7$!N8_fYDmOBo~3C?eGUqPedkgG**2#HecIru|DuF-rz zAg(#`Ph)xd%wdTQ8|y7yHMI5knBUSK1%ufa=xhC+f#7}hS~C?Mu`da`&jr#=efBxc zrsp$yLLVv;J4QnHu2zG6HYG&Zj;9Q1vT;=IG=_@tDmcJL9TTH}S+`@55G0oW;#-;Z zfMEUJGm#|`2@}9}an5>_y|lTw~ekF4~Fz^`Swp4*t5xdzSxkDTG&l)){HV1g0-% zw?jVj(hW?XhQAHlc8V`vERiwKD*m#7eT$!6L$Z@D~dzKNGhwp5`BIc5B;v-x$9@(EE~0O<)O z)~qA zfF#d%d$@A2cGkG9D2szR-SETM<|qoso8?+P79#3OWrMaeMwx>nD$q(A?@!I;HXX!O z?=JNst?-`6vmfi8NQ=HYJDyJ*ecMojeHj%`mfea-fApvZ|E)xFS)yGbB(eXJ2O^k2i4>4xwSi(PXbeeKZ+fZJJ-#=2iVj!7-g+*Wj#ZH-nt94r@B3`l zOYdOjn=ori(q;d{xFE*OVc53|CgX5xnOig~0_Sw=3xB<9dxjYy8)B_Z5hG{{gqE@g zo4h02C||5drg{QWo@=BII|^b9Y~9w`Czh$gg6IQ^`t*2UAmr@m*&r8GPUEe6xddJI z4ib=J9wc4sVuf4AQs$g;q>9izK_7v5RE4~9tJ(#=W6>4KCthr3mB3?qbpG26eYSq- zE-TLA1ElT!cBqIJ`<9aBCB2^Ac|%PH8lG&By=<(T2cZi3MpX0LzZ3NXJGM}Xq17D0 z4$&$ErFFWJLH=v>CczZSK z-(^a{re7?L5Bp2in z!du7(PIpbqxj~!SMavA5;6(OC=($z&Z)2#h_obAnN_MkrDJB5Tlr2!`SauG z2USrZ1731b4`jecRgjbnxTYZPh9&;ZQZ#lRC$&yWB<{W|h)x4K7bTQ*x#9b>eCzEf zRYap=3quQHW#P7Ky3XaoBb^{#&_L|5V6p=9F$kf%gXB%@;%o-kV}Nu|DWuJR7eA-; z9uw}(|1MQcH)Z8Hnj!?@G8!HnTIC`=W{E@D^v^a8bvag!AIvT(m9GYE&6rTe11x^s z4H^;bSc@ot2jx12vS9O73JZ4cbG6*iV{WL$&AMxSCT$KnmHJ$OuTsPuA2|x%wlxYz zv{^@;6JW2Z2-0qwz-)}gnMk^%wHxAHvv#0Mx_>=&_T}mYynTh$_#p_^T40NBLh@n^ z$aqGwjZ(=27vmZgBxuvN3VL3OhC;bq}IdvJx;7a&1wc7`BaIo(>mck3wDw)uwe^Dm77xPnY{-0AE7A>a8zhAlj=R1$Qz6qq1 zfBh3E8c`fJ_%*D4>5q_PZ@cmRddM;1AC0Dj*0d7&Px_ZiY6g25a_>Hu3iONcx6^;Z z{$}s7^1COSAMGayTd&F(J36K$=8-!MLS7UKFLO5CDX&HL(01BK1`k?Sb>xk*kA*Mt z(5f<(=*I@Xor~nsDDk_abDE*(OM}bsLC~bJ!&9tp0Uj$WJ*H`EtIh@Vnk!n`h-JBBk68wd2RJ_&(E{K84L|+UU&X z4+)|fFBoA3*CDjK{Yos+kdpk=u<;E4a&4*`0}q&^M4#thfcPIJ=8BLS<9ZGRqcwRmnPQ?3e%D{XzGSftG*wO+Bmb;Z&+vyAm}wT=%d<2fcT4M>h38eE7a%%WyyAF~8ks zXH?CmuMHlozbcNA-OOGIv$*_jMNq8l9pCxbkg4Q!HX~8`FYUoa7CNWTfrsaeWSUH# zc;EQ%-hQ9*y1tdSz}CGdy{0!W#12!Q<3Bvc8`Y0R7DkkBrV+$azPac*^GZgjG#~W& zBSp3vH5zydwL)gv9UkjehTyl^fk8z-`A?k0{XBW8s97Q#&wugXy?8}-ww(6BqS-%B zO>-mW&v%Rdhws*lW07)hr4@HAJU{YEYrYT%pu+Y_5u?>lhlhe|Z~<9v!;x!*{_S1( zl%9NRSl))qHj9h15fr5o$xslt%=|=2XXKGdDRS~CG__B_t7}1nor_}g5V;WiZ~HrV z$Y52Fq1Rvw6NfyGOZMUugWB422Sy|-SgX#&lx@)D7xHv$h5fibAngBn&98;~ zN<2Cq$hY{+rG9mlzUC27gkGJ*FN`gQl5ui06;|k4`IsTNRJjeNOl8?spzGY9+m77L z$r<=Qk(~Py(<@!}x25fQ)Yl_pSqU_iADzCx8;zW6O6<I=HRv3@ojP+MWPlUg}>5Z05S+IU&NZx1J4vF}eUZ zqwzyHfL8$OqP)mCrRYw;p&AM2(uh(}<$?OH?BG1R_v>J20Fa`|YeoS0>~_&UPeb6Y z;t{@6U7ANm9~0ZX2ySx_q1e7{;P|k*0+=^U6Zo?ff)xaR2q+n!>=cd%Ppl(`X@U6> z!i!5r14@4gU<^K_MHOjcmUJ?b}k;u4NbfyyVM#XU5YizBq$BjI+y zSeL^>i35PDERs{(uUyj$0a>yecqK6xk}KPJ>URs&GGmul6i_X2WHr}9#=?0g5ru*r z(|Yog0@vb+4&VNw7RG|6B@@@5nK1@-?ziM(OZ5Q=@Tiq@z*EE96E161r^72-k(b%%_u97UT~b_qRNWK$VaXlg|9#?QqbeLc&Sw3^hpWX6VrVMSX~ZQ6^V1id4o(4$TA-ZWNEbYUV`T8pB42>G*x zJZE})<&N{k?8Q~{P#ma~v-+Vv@~~^l3plKRKb%53b>%1kr&aP!!J{17>ds;FF|h+F z#^Mg|M=p639G=o~9*-PNbPUCd-l@<>`qV0Pm12FCI%I?D3HEI-xE3r8mG(LR`uKwQk%JAwB%y zjXdk%t8YG_dEMV%uI~jM%uaAQ{FNNN1-MJuK53doCx>1Vvl6JUzuWRMd-0w=HRZjj zz7I~@htjSTG(PD6ICqoSB5uAd3w^eapwmnX?j<5Q4MU7ws$H>0fXDLrX&)(b$>Q?p z>x=#)DW`W72b>iW0F^o1#>}y;(ij+{3j!ne4?tC#3`{M~>#TbUby&?gHDTe~!mlp8 zB1mZ@ot#HWw-=A9WFBt?g!AiUM~|?kM4y@!?KA`Q>bktd7vOsP0^TCv-&NaX?mdtm{zB|Z+z)6SWcg@zbQkBT=|I-3&mH)sS_ok974kH> z7Nws+M)^@bZ3;w1Zp&D^c-6{|QnUf^Y9x@rW!GTaGE;%)vm_=L-=|O{p~s?LgXdfl z1Jl8Gs&3(^r=^kVr^0|4`7Wu;Sc;f4+i;rXTO-?r?e%Sd9ZmxifTUjaAbeqlh3N>N zYEqgOUg$*YO!FsT=*t91fH`~MqW@Ci_~Lx)5cPX6iRR4I4Bx;G@&V{K1JZX};C?R# zI(uqKR@;5{fJ2O_SeA6tuk-o?iL$E4*0p+~DV4^d@=6?@l)h?yYJ3*Bny z!c?1DXsACqB9Tg&cc`kplzFG;DHvw01_bO#R-Mt;+iBc>QBf%Dgs|cbjfV_({aV#b zvIYXlBCikmHf}0zdA+7Rq;vzoyIS*JlG4Wr)tY2m-t1pFq0LM&^#S-?QB?`MRnDEb zMw6k$X&+M?P~hP?mdfQ?G7QRl{6M58Mg5xj(3j~6bbQ3om}2Yt;jYNvQa-WyFd2c* zXcj8R$s-?3UMR%FKcUSGg7?OP-j3qO(m1NJQW!d=FCKUpy0^7Iqq zwu)P^oV32wgX_KUu9DLd_&W3I&N4)`5Tz_yt z4OUr@dn&tj&_zZn$qKyN2^GRbG~~V6tF61+a;Yk_iO;TzL#Nx)^Q%2dq-Mr-77b~W zk8Y5(la=o$5j9YNl{KSiElb{cH+R`_Ny*2$7j7dS`vKG?5!qiX0y@~wJyM8uz_U7q z54t*6o}-70USPDon^z5{(S6vqHK$O%2L_dK^<9QO9QC8ZpQjp!v0O zw*p=j7<4slis)BD1Zd0Jdz{7gC->9wxleWEyI?5Da_(I{TC*Z9gxY4nS z7adD`7RhzgobilO)lkF1^#+e6S_UMmqV9`ycchiP#S~b|KLaEYPI)SOe|N30j6Ly* zQkfsyOjX^;~vjEw=<5aH4be`F#B#}DR)-H(~_jmIrXi5zN3kx!%NDZ z)vuaO*6Zq(Qb#bki(3tO`(4?6+V7FhG)&s{{G9qCpBGcd4HXITKk;1S=d!H4E)ce;0 zn1XNfN%e{E6@40dDSP;RywPxBfb7*rHzt?TuFXv)a_(Epf&80I*Zw}%ZJv;2=hSX* z3S(%p(s}+nD8yh7;6c@KIS2@PUq2~#jj7Bk_*c+&5rM^w`A(nx)qwkG<2^9XQ_(QV zUqqvaD^93gUCLdYfh*D4JHVh(zPsO%^9kZc317etL8WVXp<-EA>`v63#^Zenow_={ zu_#W$!~()v4`k<_!Xx5@Z0e}m^3Z%${blGf8Vc&L+Cwf$u1phkJvtrH(%)ti+TS{S za$)j{MYDDax?-Uw^_?!2QAeBt?aV`trrF2q*rM`BqXjOJ7-D zxzT3aGUP&Urr{tqgj2pi>NY#$S2n$~xwuT9=xp947bV*l$?wYN8&&c40^ zwtk$z>u*4gtA;xR93-`7whBbHCMh<3V@X$Oxj6`t>6EapUdG*8!DY_Q&EgB*bRiJu=%K-jAA0=Q(3e3U$(Gshm)G=V&gKmax?;WsTI7^ zsRp>?O7us=5tF4DcC&i%>#ngEG7>TEi)!|w#4R0;?K*2m4fwXG*Nmtg*GVkv+u#y< zj?I#tvn$#casp4^@TUsgpo$ScVfI+cG~0{wdy6M22(r2GaD5pFHw&B$t^Hcp>%Jb) z4PsKpnxMpj5xw~qONgq9mX@5Q*X&#d>8CA?*WLl9+zvV2l9j&Ckl-gO-~ZC1Z})~N zJgKNFC|p7E(~Rdujs|$Vv1ZfH3(|tRWhl@`Z%0lKx7r7CMw@3c7WQ4KNhso5rXy!> ztxy%TASaz~RRb)Ii2b04>513wlm^wPBZ}fW0EyT6oHL(Eth>#gZU^!!DErq)q_ugH zVIQ}ki+Zx6Ol=8)aad*A@;0!h3MHVN34ka#KYw;@@I~cBzHR06ez^G7LeQW=K)Lp? zUXikA%UdS>#cNZGy28ms)q#`36r72t;T8CxO#_9zgAnE$rVxXotOyb#s17Z)Vf9X% z&@a+2@74;l+n_n?>^MC0;XfrL9@}>h@rPPp5xVHdwTFQ}UE{o{+D3sQ^~gaQ zs>W0zrWr0y_;Syiizc=5*qgT?MHh9I&;QUp!&{cNWfGFM|Yl}asM^K zvj8`OQQ2J2i*HvQ?nTVtChNLO_YY^_>pv#5O!6wu+4`5>uF&d^O|lmlyP6Z$6tD=@48XGz1v~y75-H9+&|+kz^Y*~mVXMxJga)gw0Xh1r zgAKo@9&^EFwx@8XUo(p)iCRZMI{VX{kf}(hTgQSd(~!JA6$)%u*V92V&7tN&7Pnu+O2bqXRmx}#p?@}}KoF7%Rhxyo4IAJSR zpMQ#mdrf#v*HQS9i;OwwgFSxoS`|_@h2`f5rKF__>&F#VHBS$gXt2X08(bRQcSxzB zEYNSja}rwWu(P6|HVc6_gAKl?%eXJ7#5WeK#xGvCJ?9mKng$E9J0RMMJOFs7KJFM; z{^AQ(vDwtfLg|AajPY8*9{iRhtJM<)W_kM1WRHOt+$=tE#C(;=7hQ2~FTI?5+d8E= zITt(QepeJ7!n!D^VO6+K5b;~&i$rjg+yfOeN#@S)5!-y^oF&Y!Q;a_3f+n@&hIQmn zMSJpW6J5fRxV(yJY1(VE6KD2~q93`AUCdW9E3k+pq3ni)v!DA~;90YMESKXk zK3iEd+CqFb$@ zXj7@)P?2a0;W$C{Fez>d-<-(_-c7bkF4OkKM_JsE#JIu+NqgD)EY}#~^_|?0+eyt( zA`WXdke6G;AUdFkdb=bDjYy^?M-yT0dY=eoxi^f(CVss~_KWdASd2DdfFYJ&3O_JV zqC$NUHu(U!fq%`}_v}pPRHD38TvN~JRF-R$b5E(yWE4W(7ruQOk3Xs4TZ=&fe7+tp zuh5XaS(;}LLfbHe_tVo-5wqCBkKOiZw@nr2S4L{kS{^t|{c$m=0KJaIndN#`FP)!pz*WyfB0!fEq>0IHi#W zBeYq+sf#+7e3?fXWK2wUDP*mC&1v>x0IW(6-fN8{Y8GRXRXpw9PXl;mNOh>!`2y|W zD^SOo76~G56fbv8Q5@$ztS+H315U&zU{g7g8RhRFD)~x86_n&Z>ECl)5VK zIt(1*LD3x4Q%begcuqWqPP$P#GiG@U8836FhM1(=Rr9*-v`9oy+m1|XNEWPGTzc>b z_kd0OI#xIhzHYB_o-nwLe}^d+AJpg6@j4@0?-(;x(6AtB26krw5Oy zX%as;>W#n8wAdSJWtgx%I4q2@!tZ44ta{Zg`Sb_ttbvv77pdNP0^Vp(;sPGuqbDSe zqUOymUq(MoZ#hmx;Hz*1vzH&|rg*y)9tT&D)ehY>w|&b$iJc*j~F5UEO; zuDVLU(pPramyfCD1|0&F29V>nODM-F(+*-c4#-l=u*j91^F6gJn)ww}v3#6qQA{9O zgSB@6#|&xMD}{njU}v5M>)Fa9UFn?WRxZj#ApBkc-dVJt>&$7Nq6UBI)$kdgMKB5K@LYlFGjrtSirO2D{r5u2mtZ2 z)}HNObG=XIs4pCBSJ)AKldqh+Lz;HwVXN$IrZfBW{WoiGNu#xL;^PYw3pg0O_^)m6 z!Q4;f)Gv>j<=o#C-|KBhNVz`^SX;NVfEa+NE1o^J6#=^0+;S(CA7@Of!lJx z<9l;Tt)56*4I!&5i$;UcN6$KR%BHKwE$dx74}fc)|F4uQ;yenU^!^mqy&9wz{-J$JvfT`M^)|4-)_EdD>*+-fE*&3Mz zbajY-aE#)UqaG~(@ahKbrL02TlCk|kmqw~|Srl7~$+`?QLOb~q|J!?_Or_DH7l&VZ z@n>)NsyX`|VSddFFfytpy;jPqxIQ(_dP2}~?zo!5&b2V6V%GU5rMzq*IxSYgtPI>RhxBnbyj*+`2#43 zY;;Y6t(Fp!QiVC}v=qV*dq4GX=LE9P$V52Z{;*-E?o@IAi8V);;e0B5;7Ldp^`+@; z#_e^~Nyd^Dq;Y~86+q!05djj`jz+CwR57Tshm^ zQ~o9F8#jGxUYNy*F7V1oC8RS~u>aKfdaw0s--7O&p7K;DUueEh-!&`6gt**~2Al6k`r7fYSJlH<^?Yv+x?Z(ZSU z@TN)hSNSno6(Fl6Q<;}{elSM2X9>V10n}C%3yMKSLU@k?EZ0E=ccG)2A)prWX zdruGPQN?`sD#Vyd1dtH*um{BZJ`Z@A6KQU=3{0k0kb$O~;09{|C^EwGnhdWQ)%D82 zV(&a{a`0QudPW+Q2ZH{x@<$U9>hW^xuxiam;_zgQze;PF;jm4*;{QpTx;!nej$ZKY89!21nL z_c|R)7ht?kn3u`Yp|-x#y3w2jkY0X;IZF&sy(a*xMt5(E62(lOWRWIGt~ItFQm+!3 z_kJVKOxT4@ccnl|YcmR+XApn3_mp1n07NM15?dN5Q}4W)?|UR4+z7odnc|7(g6cKZ zwW`6*D!bGo6{sR&EXK)L3c$f9C9|#Yzgmyc%AY7gpS77q{}}+;qL`32tXt#pHv}s&c-AMue zC6j9_+A5f`Efc8oTS^OW?Lq7vno+p74RL3rMYp8NjPT8CX8WI4*@ z&-C$l{FX)?#<(s7*!|cDe(10^4`M&-to0PKoZTez<2wF3Av&%GYR0QsFo;59hJ-41 zpf+fj!_&l-mzfl+J>`rG+ib4>tEb%(hEl^Xfz{qyhQwDGGkefKNoZ2+X+ge4fb&dm z@WO+VS?hrStpDBBL*)#UH_2!zFpctB>m!9FA~g+71DitXjBwiblGy3WHm%Kl=Z&aT z*=YROWp*K={yH}w%I7lfUiPg%C5Nvx3+(I)N-nn8d$Q;cAgV#0+O8ZoJYDqbd?PMO zE1No>@Tu)NMF(pSx@XKB9s~M&b&tXFq>navx__LRFT-K)RFqjC(=PyBKNxr1$m<0^ zyb~ik8PCZ6A)>vGLt?00!xIy8J?D5|UjLZTQJTbO@3QNKV`2hVBSH}qLS#xeLk#H{ z@$4?Q9#H@bLc9L@$Z)p#GD+O%bPlF{N#pSf9f!=CaM}CxuZ|osl~3eUI1~$QhCMOpzIU);AS7P-i3tH-Y0^FCY8IKsp65nsR<7`+O6z6Afl0!Yy5u3eg&JVL(IfIjSI z{IFn;*Wm3Ezf+Z=Gv6WOpwD*<2o&re=E6=IHqtHQ-=$X_zkZT3M^F<+Sc|ZCB|(TZ z>6-RB_q)(LK4S?o$_ zACjkCr42foLRnWq(U9t(#{fR}3UWEZdeP68GMO+R2wv#7#g-63mXLtNNOQUrdte&3 zL-RulTgX8h#WOLJ35d zJD)d2vs)Mn_Dhk}f*`h#hdcY#W0Z^1@M90L;)}da(c|qoP}mMXEsZ@JUy?TR5~~ zG+n&#F`G@ppH-N$@lpC7Nb{)SsSU_($e9}k^PxhsD3zcGuMdzgbo5b7a_sq{R0kQZ zq4YQyjOdk-xG!>$eG~;ng;v2H^Y?<@+*e*yuWT^~iahn=a~Dn)z}bZqr;7N081~t6 z@_i+kUY8LHo2KpfDOq`Y3BCR=8~N)zaMND`>y*^|J~MCKaoAD+^B3&OEF6hy&w)6 z!7>hPfQv$CvDaQUZujkJ#Z*(MgLPz=76xK+Sc2cpicKi_)4QeYM;w8Co5yhJ%87>3oc-+bAd<0!aC0QnRNtdV+z8aYLBx*Me5&Tk z%H>DoPuV7C5vFS9U;rU@`sr}W)&mGCxYF$$OVU_T2~|I6P(J$DF-&H8ve|bKRWwDo znG%9ZpKsJ``QC>oHx4+s>^MTBk{OVuH`sFpN%QuTDqG)*&Q&StTCTnmLjq9243YOq zf7)t-Y-9YQFurp+lrMTaE0x}hGgi$rSpQ?Zyds3pB@YDf`b5;qLGN>lJA2${ZnxTq z-3%-ji*jX{hvkB^r8uU;b-KZtcuHogZ-wWt*jJ^;Mual;g9wzZ#b9}xnn?^7&2iN) zfGJu5kxXs!Qu1AWCe8PCUy3N;3H^vuCFmOcU?IJM)y4_S zM`KhotdiR)&6S7Ss`U68nSGbD>LW(H{oUAdH8_we)mdk3>j{pDxOw;cRe%1g$y9?! zE{_9!0^z9@-1~f{aoL=8f(MwBL4KQmlgjgM+MqWmt>$`5jUs3w;l>K;u2O}W0wxx; zd^OdnbYItYEup|3)3anVTC2az03ZJ%l)taGu5Dw9P3!_IbdKWIrxrJj({ZU9>CrM`2!Q#IvF^Rr5-8FLG}p z?EpmVBF?;^QFG^cWhjVJ9bjQeBW}s}&7Lyc9zc1i72N!aH^`(s+}`Nl?zFzI@|dmRVBD#2SdG;Y<@z!5AcqD)zx`3i9hcW4?6kNoI-7nc^^{s!G4E|qc1}1=Hd^>6 zV&zQ4?7NCc;}4l825iG}S?tR1Unh8`A2w`vmomK^twrTErN&3895t=MCAHgHWRow) z?Q0dutjy~X^fSn!EdxnuILZByqlnAtB`G!>KBHFOewdBFfa%8+daZ&FmRaDNLG;vW zvN|$0`|&pGH3n;;RJ3o=p8!H=ori~dYHid^_Kh;Qu=EASH?-)6!Y>&oVD1Y~-Ww$N zdh2ls8o|sgo-AVKcX35ub=e(ZJC=z`9-Z4y60Za|k6bnZK;)cv=9$a(-SMFfmpz`( zXC|ywTkKT0^vn&5-&A$MQ1Oid<>k&ApqqADdHx0dE*0I}E#0v%5(&ATc7(|QP?J6N zcg8EH6Gj?t&$ZgUK51ZBq#e|v-dQ8t^2M}n*#ShLB^bb2DHl>M<7lGOC%ShM)G*fp zb#=`zB^n1R08sznG^W(vaGn39^iOqppW#2!p{5-(@=j;4>xs7P4jE7`4HMp^g|9Np zMxXvXt!W|#H9a!NV^p(~WL@~Z^+8BhgT0AxS3@tm=_n88Hxfj0*3FkOdTz%vs|P@E z`?H!3qovTZ;oYk*P2WUhCv#ST-Eo94)%BKLn^!vdr_qLUOtlGgdim(0+AO5yMEENH z%6p15e0Clje)T=<#Vc`IY1U3z8=wlqDfcyXclW*-kTg!4%E==G?GHe|TkpG5KmDD4 zhNbcPha*QkqV&Ck=K;A^8{wPP3JwbAp>G4gS#}4+i$FlWIC2RWmMuD;w8smk{*njn zjX(gV-40G4;D$s{c!M;q&$_%X$@Swj)OZ z1Q6Z@KFc8l{I55kL+t^sw(7KLC+yc=D3;9~D57>)o68n(SsvPuk07)Kj%Og>^9PBn z5hrRpF-U819o+Bqo{#ADge(Uz=0^gpape`1s^t(lYH*}+Jcg})fow=XQ;jP3feezI^y(^Fr`N(It*lDiW>zSj5cT^8cIdXC}Y9tNo1Dg1TO;(Z*XcJ{B& z9SQ#S$)#p@n1#7_{_rTPN=Z<%*BmK)}`8w)e=_n?k;Mr$a<)D@d2w1w$N!AXzNFM{=S#K969+$28)Md%e;`d z#@zxaVli^E7Kn>?OFL)S>bXz{_;K=kvb*cXsiK?kzT7JDZh&+Db8%+sFV7v+VsD?? zmL%JvEyZKY!R$fC-nzM+FH~nQc&E;m?)){r>`(^M5P2rHsKp*Z&!A`aKZSR^+{S50Tp+sowH|o_Ox>SGOIm zWbyuH9?x^PoD>1QH?7W>PI1zVrUGvR&VlW-^mKd$$(tGXbF*ZxYu}8x){$s(_!e?% z>E9YW`jo@|GHG0HyX5&BMO;dUN4H zFS_DU%Y%Wt&pvF~9;Rt6Wb?wM%K94y50u~i`*u8^-L&_(?EL4&GDbDyGKwuBoccf5 z5^|@eY;&mJ$a#Hzo{_sjv*e@jADkBvx%qpK34hEKa*MX!ex9td|0<{dSGP&f{d;_u zFPU|upT7bgM3sx;7oqd}_m3TRKci__Scu}Lpbz&5w%1c&cEnv!a%SuQ**`4wrX0iXl1QKgCvw<@H(Z^RN2;ak7Kam&&ywqpj@T_e`;}jXQrlJw9Y5~4*IP6;wTqzUg&2f}Qr zQ-G)HYB0Pq>Gmze%DKZ-#1oXMV5F=3uC6fsANqW&aa!H~sZ)c7_5a1G(W+`*j}XXF zno~zQuWAW@EH#yx$xHJODUH;2m-0+{xZdG&fFIpppTL%rMt^E8^x|A~xS_o7K<(DN z(WRK!;Eo?~uDu`!FOHMms zMIlU5-66@p|8}S3LfXFl!<}-P@AAKn)QEfk^rAgW4iCEoN(cNs8|#n#)40L%ug19l z|I`lnA75_%QH&tWzVt;G0Tdk_1oZ^5_6;UvOwJK)=d+B0UjV@NB&hHDYA)6??!(<5 z)nw9vna6B2r8HzEKz^E$3Bx-@`=_ZgFn4j0-3ibvf%oDD>%brYtFh7aJVSs zoUKF!P~my7BD)Nf$s5(QQK!{^4QR^J>)(#&85ERE9|^Po#oaP<`)*rtTd23>f9l_` zZRyK~kCs@x@IAT0A^N|%JM(`izd!DON|d5avPUu25M#&|vX6btkiEp1u``wr%387u zBW5(&ml|&pEI2KF^mev$szo z2e=Eay<#}2tNPKz=BHuOz^}QF>)pcl%x`z1GO>5pW50^569{l=Xuu@r8$(?|$NO@u z@pxWAGJZ69*~j^=sz*J z0(fD7L4#yhUB;Z>SS@dJXBdnU2mDc)JO|u&d=$*Y$V?cYTxp_aAW#dGF(;Jy%zN|u zCje0kG)&*~{b?t`v;P?DOVrCcj}+~$j;)qdJ5xmo@AR?c-M&s_G;okaW zsg)c!uAj&_Uh6gqct@Ib(1)_tR5h$p@?$B`KNFkJfEW1I9zj-RdO4%>BestQfVXgG zX0y~Hu!#yv(XEiP1Vw*8qo?}#Y1Q1qf#`-is))>>n}`fqw|{?g4yMO(%fm|9Zr=){ z{FET>@y0Gr{G^33IDgXQb^oUjcnu)*-rs-n7ua*XGJ7u*eb#-tk*bgG6m@pOIKhaX zXvc&pajV)NTaU;OJp{HK7yEIfe86>D1x*o9?0H)DV05m>@pF8{fu;{&Rw^@OjGF?z zEn7XCwdu8P#F-=PC7M%!tpzUfEQ>7zmRZjL6b=2Qn?){NntEuN)8F`_^@rQ^!rBVn zh>Xc=GJe*M)+Rna#mr9Hp%;N6!wLQX><>_$wDW5_241#aR^>7OTm6ALb{!i1)}4V{ zJt*NRzGO**D*+xlZdhk$XIkr#JM+#8wXY>+(vz2&_rX-BJ)!~$yq(8PGhm1A(hG24 zr0hGRVouPTr$-y+;jWXl0)q=L_o%YrW<-`<5UxAn=7uL3uU@lzpyzh%FV_-WJZi6OC6 zN&0KjiWj&HB_b9v3xhv~3t;JcFT^|l+lv*kEQ~B~J#;2#QneI%t)J&M10Z%zv(}6b zvkf}0N}GG*!9$e=S)&zBUZ;$lEq81yDmW`hDrpXAorvj#xF*!)EX5X{4$aU^p{uJ)ytTT?o0L3fs zOltgJZ%4$1_)zJN8xINR$+xp?{JyXo1pebJo8pgz_Y~2oVG5u(L9Lh7nd^6$gxjg< zLv2voF_^S9t$1cItbmdukI+NKX!(B2Q?TqyD!yucq_JEQUZ`>J<){h=y-XnyS@XDB zS41K=)sA)$7;QdvImSB^4-q0zT-F+g`!@VOxj|h!2Bwvoz4+yx#g2`oM!rd(v!s5I zsvRkMB^$+nNCvW!6xjSOA>6mQ+@h*$#1iT@cF7EOlp3*o;XM6#AvkLE>Ba2Pr@iXc z=VaX`Mwrrdu%pEAou%AF=SN>1NbS-Orcvdba{m3oF6(*xQ`)2f8HIqYA4_wkczY@d zfjosiP2ro_j6!aWws|dJzO^LPD zx_(WrF6AZrf3zDlROon~_hbR-`NP@Xj0;^8bQW12}PgFv46rWQNqvx0tZio2Mu%Mp?vy5@OH*t_0^>FS(U=cOnk14Lx*-_PeS9 zva?GI9utiT(b9pQFDFn}B`3WS2R4Qb>yip)OP`3duWN~99B+tA>cR?>yt)&;2mb37 zx^aZm^&6KVo5S3V@sAEG-UEp&Lr)9wWatI#CF(&c>SokUFJC)PKY!L@lm&wP-TxA` zzPc)~|28QE5!48Au?v6G*^dLSpJn8rm8>?P!9YNatw32K#9Wp`?o!ayfwXThbzP@Q zEi9W_xX}8&LUdbWBK$b#ry=<@RV&7ByxOhTi&b`C{Clq|-%PkW0|9Q`(|^x`vp8>E z2~>bf$HMvt%r}os(~Dj-eKhciE828(Qug^=&g9!_ioOw&WBK0CEK#hwY+(N@&Yp}E z3P`?XcB)3$si1rnK!qGRVb-%&B@P`+ykh+t;rFZ+*r7N+HtRWEs@V3IwUt+s#OUQ< z+2z=g0~6aOR+6yRPs3r{QAV}>Kh+w%IN&a%a$;Y$cLuLycgJSQJ@{G^QUa2 zeiJ%JeO}JAYpP86UFv{VSYSZ{Q%*GIBjIUtl)Es*5N8R z_%y2s(!rDTwNq3ukv-Rs)92E(yfX5{-(P}@wh7&0b%-Z$%KM{kng775_1?hj$(iQ7 zYQ{+fyXDfKh!1&EW`98K(iP$pi#f4QK={-6{hOC{WOoTMcsnon#7lt(W0zwEv$Ctb z8QD)x+H3AHwcU2Fw>F;dGK3C(?a&_^VwLGW$>h&D|BmPfiNI-h*JfzXE?f?-F(qr* zwI~)Na2x{n1K3G+{e_2nOT?TcsUT55B@XjCPN?5P_<>IJvSoE|+EVnaIF}m`F#3_s z%75*nIjgs_)KN$l#dmR;rS0uytc~A!IvqfJ;qFIdB@2+=)6`aiQ-X^ZBfU0#zd%A! z{AzEUp%YZ!l%??y$^s$f4j1e(t!zb0rTe6l$uO$yBv%4QfTwMJ>c4t-snzg+mPtbh+IWC1 zOB=xDleef0mey9qO7XQ~oUa0dYEQHCe10Xm@H8*-NAHQ->}fX1MCy6I2cqVDk_(hL zjtU-1)5`F4Bs+#>+xXSQ?3p`G>YE>G8+A(fW9@;YECI@G_|`gh<9A8xn1^p1yVS#6 zWTPl-%=;eYSvm2_ZaIEf)X*}zH||J+$A7Wpw%XjtTbea@b$~#cn@cC1xQeYBar2%rV4^axgy!YX*v?5%++A~s&&H9 zxw-#c43ANl${&$s_!r(2O&d2zpYsyCJ(vaZ!=_daLA6n>UmI1voY zY~z;}lkdhO4*lLKmLWWV?Tj(&Pf-xWehr?8uTsNkBkYW&6v}k8%~`p9)Utgm3F7!{ zcP@(g>CngCeD#X9oTt6dMTv*kVQZvxy>KA<)&|a zQ>Xg&9Pfjnfo#QdFi?_S4@%<4R!dpn2*WvToT!z5SbE+cTb|!)@4c+6l+Yiw^l(PG zuL2|{N2BuPzsl3nQ&FSBQZ`pb1D*YTYV0CX-g!>T&L3t^X#U6#Qi)|Z z{)`kyFE+F(ST(WKZXB+b%e9<+o*Q6^!I~AZgt`>4JI^*{4t;kl7Gqr$i955cgY>Fu zHzpUyOzti)u`;D?yics;03(<5A8S_@Za@~ytd<74AEc_9BH@At?`7;Im&B|qvmO>* z#Z&SdX^R;ivMkRdEuhnqS?$Bkv zk+z%ji2LC+DT=!e)x7hW6I2j1-W)Q@f@q-q8jF@(X50t--c8({Rl(Y;+y7dB(K~Qw zpqPT*tQ@Zz-f1s8uosA(G$ka4I}C;E2AzD?tkvu=(K_b__YzljbgUJ-OAnxYhNUuh z?rkUj@a~_^84?+O3q`aav`8~&hY1d8zt@ zoYi8D24tAs_}^|buFUeTN9GUM>-NFStfLKfn_fEne$dV2bg7;ngfO~`SlDiK%?3A#y=_e~dhnI$M$x6$>-!{ldIVe#q}=Osu7O{UnQ#|| zs3^Oef56e@R;75P(^^~FL40T!IOC(Xhf2C<>hqys#b0<~TI5@*xvdnPyf+tdIE)t= z-%ijtW-;cQbTH|WabsT(B00=SM;&`}{0K((8B-agNjUl z+H(ghuSWXbkt?(U<;FszR3bR;P03I8XJT<`$1KkTr>=2H8X*LxMj2E(GUUF;#G&q{I=T3e(A+hM`dteqsztW>vgw2YEFfpMrRoCaAhET8#qR+ z4K1sdcknxwrOt!b=JAj5_fMaWJf!JA>8>*0NP4JjCPS-J z@LfOJUEK#Tv5Vs&=%N)lNr0LX@ljfxW-g-g!$CrQ_?q`jz|V%sgkz3(fh!{5Zx0~W zH(?DPgr7%ctN7tuc^z!2j}ujy`Qxfkk7i`lwM%Fuy@I7jUKE< zj$mqd^Xh1UPu)3f*7GB?XujKIt=mL3dSj*q^4AFA$cxUa39~=#FO-liV7;(m=kPOl z&d06t;!hYXRTs}5UvG3`RwCGVfq1m6gB=h&sj>Xi_>YPMa$AH09Ff)zuJC;PMIoc$a8ko#;FMdqaWq~G|Y zfOb#tIbZw{s7KixK1I%McbvdEy{*RxCwucG@2FJ&Mn8gSOI_^b$+l_ogU|(}9fXzO zNB^LkFHA-Xn|~=Z+JmP)lL*{hsky(oXyHGA5JIhuzdt$L)w7Hd)K4D-zBighh(w)} zZBm_b_s$V1uySv3Y!9a|GcU6?=CAVKm;tb9(x##Sk>JOx6${M4`&c~jEQECm*uL+S zuh?O(5IGYT1CeaerlyK0jmC@bXV|vO?b;qp7R!1Umiq1w)F-?;8tQufqVk(1!w{P< zJ$(GwspZq(&o{peK29>GZUNHzbmEy5A)|LGk8Sr0&Va4dz1m)>7|+ID@_lsR-&voc z<9ParGxcv{)OX4O>e~|-aO?UkKW%aFo3vYqMTR=@V53z7>q5=jU^_um%XOUU^B{-k`p1A`Q)C`?r zod-r6E{xr$fE|qN2R^{5KmEz~=Op~`zcG03#p~w?{D4Ba{ekYv(oVM7tE0VUOp`9! z^{TV&jQ{L&K-2a%h-%tsZOXAH0C7*;Sy%EZbsIjFy<%s|^OGd+pN>oHcXTHKgp9^| zf1!gfjS>Zc{${?fP^y>#V5-#&*E-r;w+b^CrDj;?^HH@LM|EpoitmL-WozW9fIcHC zI9l2ITL=NbR7We^pv;N>aSigP(b)azp29UDHK`3{``>S!<5+k=IUd^j7Azs1_!xuE z*hyWb^l1}&)sD~KW#D+?ziE5l! zM{R1;!?o5vNh$+6=55e~ud%t$%194W-b8Il%w~~EF0E|S#_YkgjtJv3LGRn)|Q?hI2%l!4;IG^soLF6*36hoo?dT~>?*r`i|zbp zlBPNBZ(EV-ZB8<(leZeSSAj*FzIFdJi*r54l!a^{rU)eq+<-^<6vt3}+Gf+6O8iEZ zoeT;=V`mK~G|?s2mLNC1)Tlr?7>I0bLHwRK?1l$FA4Dg%@Y#E|^Z6gJG9ff*Kh=CQ za%&c*{b#3}{5{4@y+zQ$XE%wWT*`sffQe=M6V_=S_2GzKyGsSSue5IkJIimEB$WODr%n#eCizu*&Y7(PJ3G|_GTHisQBy~;w}4X% z{>&~eHyWLAdDzCp_8jvSUwfN0#VFqurQIH*VCld$ek}GFiw(dv0BbRw)bGae883i_?r)H z>Z{`mq%6m6PP3O;XUKd=hR)o@OcxvB2yCrjQmSb$59`rcs%+UC(rwtWyE=-yw4BVe*0%tuPO=Tf?+DxbOQ~wcctA3bY zHCj^jPB#E?r0~jFHIwvyC1)?8w}O|IAcKb4uP!>Px-pgtAf#I76BGmYX{A*at$h$K zINTaD*4EfTZ&r;o;#1VWn@Q$lCr71mHF+q1-I1gcpbwV@Y4FY;k7lK{&10RG%S)4F7&FrLiYTY?Cl{5d!9aaIWrJ)3L=9&E&S0T!<|FjPO0ll{C2LN# z3oS3RIt~K0s&(~BvlPk)+-t$+R|7GU&toZL^kXnkzVooe>#W>yeIs9z8U5Q5(eAG>M z;VO9MIB*iV19!429YzeOnaHHEgHpFE`I(a@xT&h(rdQip#05~S zbIZ3*Zl7rzXxHa7nLcFeguX0ngdV}qkI;t47RxV{5qCaW-P>L-=Uz^Eczyy9lq0lA zS`i|wdXQ|2&;)e!21Fl`bf{^F(b6kf9Pud?YE1qF!T!oS~cpG5g;R{q58u{G8Cb+9Q;y4@qnc=*@yEnhPR4&~Qtl zkvE{)j&C+%BbT{!4rKxhhL060u^YtJX@l|)5g^g$cZwjzG*op5{Ef)Gx4c~bAKRMv zTX{OH(@)L)egzf>qor19 z0`A|3IbnpEe(1`yMZ;Vq7tKhiO4W&zI*+v1lsd4#49ahy^p>+2K%MyzoQ+#qm}kb2 zLJL{4wr>9H=TPMEalz}N7pPz<-u$$K@Ll&1yaT#qDvYS1m#T=oH58g)mv2i_ueT?@ zjn(UcPkRixioOsPWIli~;8W`;kQ*A1*Od@r&Iu1l8FaBam&qaLF|C()*-|)r&k%23v6yiK!4H$^6 zV7>`Plp__&X(pPV=zWNt?>$?U6__CrV9+XH;F&Upe>Z22mCO&NsL`RJzJkQ;9}p_~ zHKYKpoks54Cmp9-eS<_e%d7ArEO*e&8}VCx@7_@S(O%moqEfY`L*RG?%oKVbadN2r zT(Blg#4s9+X+&X>!{+HVnr*_&;yGj%a(QEi0s-^ z*K*xxN9N;y|DQzx_=xtFRmk{LWywuLdocZfkKuf@EC2iT&-eMm|3AuzLjQy)`so9o z7kcmWDRI4m%l;pq;QxT93T^+jDZ%b<0dDBPe`QswKga&IMAr6ytW{&T^Jf?n#rS&P RZ~c9$t7)iF_V97|{{ethk+=W= literal 0 HcmV?d00001 diff --git a/tutorials/text_processing/images/thutmose_tagger_final_alignment.png b/tutorials/text_processing/images/thutmose_tagger_final_alignment.png new file mode 100644 index 0000000000000000000000000000000000000000..dec47fec51436a54ecd2f58039a9693104c774ab GIT binary patch literal 18211 zcmcG#2Q=IN-~S!m3#}GKjkZSZ*51@sv#k}gYR{A)Vpm&Qr8X6NQ>#|gZmCUTlpvB& zBX&xx5d5RR@9(;<`#$%5uKT*qxt+uNlXLjwBgyCWdOe?y=lg?!o(A<5<|_aIfLcpa z^%($g){%0&b?E}-XYZw!OO%BQ{7gd`P=UC!OnGq5@v-h>0H7-7D%s{d-!ZkXuldI|9V z?^^uwo!Yt6*9~B>(5e9R_PF4vw)IYc39ByS&??ZKVc%=;%B}92%cL@!DXF9;|M#t4 zA?WXFZB9YKLAgwf@lqL8Xe7jYM}FiB>DHvV;k-aetKI^lOoFsDnJ5~PKHO@7140oo zR_yRVK7F8TzjHuT-vbjWP7}H1Z@DY;V`Hcy0%9BL6Sp=x!K?=ShuNOf|2yAaQ%sA!!W-S{0C|5pmkjjic1v5>7=O`FXeWUpCEiz(a@ zBSf8U-y1u%&kYQmc~K{*3Yf7ltUprW27GIK{^#S*8eYc=rZj#6E3>fsvuEjCnl9w7-?zP4RtI9y(Raxce z&*EO{5`DVDXWK;yT53scQ)s%s`{9kDu}fy829GN)2b`;yEeoZx^0zZuni$pt^Hn=y%~CdSH#(gAEHQKZsoqW)333vkb)sM zPwq~l5v^-m;KCtiDgGc)*P2G1OU6v{Sdo}u}jPLwvfTAO(yoq2Mt&sov+)pfj=!?iC&?LEI@gi1^ zM}#&??$aWDqxA+)>3{;ZSwzHoYw>YhB@jy<9yz*C6b*`gEExVdPzU9j=?huBRTFK^ z#z+blZ1eigZx-x~j1JPr%9&)X;&)`bH~;WEu6eFOqjobiT7~(?#dC8r*ejiIdDcLd zkMfGxh#=%H7|tY;?=&8%Z`(@f@xkuR9?9)CWa)|EK#>Q#yQk%Lo)@`|Y!`RaE;P(`Zx){TZ4En-EUE*K z_rh{*tr>2ehvUnP=YGGJs7r1>{)n&aEhqd*sF*=e&e>=>dpL_S*TGIk%grE7jy=hJbD>quD*o4ED_AuWgnvv?;ECSbuWc#Y!Kq9Q3)( zz@s>O*6ToF{Dhz`88e1de1S+oOvU=EK~ngwg|(r5sWmrB(|Lti{ZmugjJS6ckb#-* zBvL{6%gkP_lw(Or5mY{+1^Y*nr)C-*1`0Sk7k3YnZ-O$goFzvwY+-(*_~LcFmNYvp z`xEPmwVLH$Oqp8%H<*J;2?m;i^{vm-xy9Wj+oWoB`s=EHUo@H|6{ld;tQ@&3+_%Wd)fbHW!-=H;}beA{T6*3UQ0mIy93H@B^@ zIqJU8C~omI4ykW{m24Kj}b+cN9-m2VAUBWLZY z&XwxwzSPKDBn+RRYYGkfgSIB$cxghMB!?3YPgAcmdS!BUV8PQmhTc=~`FL*OkUBGU zk6VVtYI5&TzP9zUVL*=a{74srF8XG?A(Gage0^eOp!zXv?~}gS?N!z**07d)AIf!e z#_iF3Zsv+eU*+oc*TmB1M766o-GegPRzBBoDonYQ2~wFc4I`_5lhhEfSIqfpP3Sy^ zJK_r)GJq#xwWjCs-KEwK%Oe%n(LdJJ(A$T15|HYjgubpo{Ceh*NQ`ei%b!WB*Fd1` z0FIt_TjBZq(VDoI)yJj++>nX=?%Li$bMms0NP`6u{4v?5KM8NbvzaWKgnxf$+OO{j zyIxnec3AT|U%?Gz4&*y9D=bs<*(1#cj)vyQ8=C121aP8(U|+Z(>?QM4H<)f0wF)11 zux6&2-Z1mTjDS~rCe2l*y5BdR^A0r`*}KE;mP$p&h^;V8d8X>;S?y<>I0!_?FlhnON- zbhQIAW<7of9*%O(T8782;`hJdY;CAMLvkB8Dplg$T;JBankgdrc`o*@MaaT@0AJV% z2Esv3SMfgjy0k3gj>OyOgOZjrFr2PmKw}!xCHSky&@5ve z9c+LN)Zf$Hjh#2-q17F}?6k-Pm`Tq~A`Dd6F0_}V?O(qB%hc>~y5D5^*1|mXhEGLO z`v4vkpG1loFP<$_(!I`Hn0^62q>fiCT;ulYHU=gCf~;D3hqT z>19Q4W>pN72hS z|8)qs{@<*e-uy>rFVaXMPfr2}HiNq@%W3N<|Cs!6lPCS}EC^8CSw%rIxx#ItPk-e) zWp@7Gtyua#GB8c91IFB&g@>k?DznXp?z&AEt6vElYcbg|Ia&+zZh^64j9Mu`69~SG7a4VfP zr5@!L5At2*M@f`$)uDHiWmm_Oadrgn@w_eZSXl=`Y^tQV@9_nLgnWs{r;F>*#}&c| zFqL7msBDe>STB{xJWuoh4t5~|djy-GKe*^P^~Y1KtHY~z<}rc}kfg<8I4*RYa_#o! z^M)Y-XAVhP_S@nLm!H6NpOjc}YH)@0J4*}84+ieva^`c+SnV-^l4`ruZv;81`t6lM zG;?TJT0o1}YO>syciHeOam0Cpr~97GDtFdfre9$7~Ll4@_HJg1ZeHn-2$y=6m5L zFrPSRB*SOl429eFuUFRoXk^JE+Y5q)M{CCwCz?LhoKw24`cXeD-XVc5oj%xClZHM~ z$f9j3mM80BSK}>Nv0O|$#=%K-W5igb)zQi6XLqG;{Q1E}*uvD23>^OixW9@_msgVz zevo((LWmm_o_==c5_<~0zK&OfsT;GAesXir$YixxQ>FaISyAwyS9z%X{tsk^5bkK} z_gDF?ftUXBuQ{Yrt4&)q13L0`Sw)T`Hg5*9=)ZI?Du2!89GEv9I_f9E^NUmm8ug=^cLh`%T zrw$5P0ZARc41#Iq&yLGr^_(t&bctg9exLbXM5x=<>OTr`J@1SUn48~9yhYlGM)plr zu?~2l?=SCU4757P8vU8%2G&>Q*?D_79aoHc#On0yheJ1tkO`@XR%*@`HOx@^%Y%cQ z7y7Iz&2NeZL;*&XR>K^S+eyYWm0HvVCz`|cSE*%V?u^e;w+pbw2V>Z47BYS-^h;)3}`Rj7j^ShHkFH9DlX~(l(oLWiNB8j^nQPT3hTh;$r&)3`a7?-!J0C=1mojNsF8Z#)o9pB!3#u@$lrK4bI_ORFbuw_o1 zOTaEN2~l@Ux4+NH`vW+Cr=~~R=@RNsz+kR-NqD1qWmYw|;eDZ&difNhairF#*>+vU zYyY#n9?8GOS0Gi09aGoS@+!hgS)0S)`2?ASP1$WsdJ;5&J z`NM3YQx<8aVz?D!*VwY@df~OBi5q8?d^{zhZvnn^#Wk0|j?s^&OLvaZjXAblU}r(W zW*^G9ftE`|x8|_x={tW$a#SQy=8M?bUcI%G3{iYLoFfa{V9JdTbk980a{p!I?}Oez z(2mY3PqO48r9XCF827y<9}2sdsF)JfK#Tc$e=1sGV5mz`H~DdlU3al}_$TLxM=RHR z%DPb+8Ci=3BxCix_=eiuMUFVhhg4x*7Nv@}7Z5SiyW5(iqp_RwpJ=qcJfYH*5hS>g za`Q&F9oyJ$-&)(}be4`pB#mg{Xm?y}!v~xQ$QC=(*4mFg(`gCz`0IE*2PRr=^gCpK z3`|T6{P>elbH6~FhdTr)wPyY({P1np$ckzprqwoPFMna?0MWSbb6ZN!%sthqYi{NC z!PA5QuOi*-3&UhmN{WK}$hSrM`*_R)Fn-$YOi34NS7f=Z9(>V6wAOQoqI%F?f-1g~0nAMoSXGG6%GLUb2To)zro zepa>?FW;h^@aVr zp04&}URfUMS01BJub*8E7J)sw4zDbmm_wfuEWWz?q;GPdA> zTJA38>6u;xIzX+CjiI&3evy!|M~?b&(cr80YY`Xi0>;<9Y&;8`Nc-LMl!0_$*F3~n zpP@o=*$oK8px7YWwd)QIpQ>62EmhU!`?vkmJM(<|%gR_CSVO`3qiLBb_IX0FA0t}s zv9K-MCDZq+LpM6G+%Zyr5CYE0J{qP5bq8<65+as%VMW`d%EB#wtIWw`lX8b1eYqdg zBeQ20`g>WXZ5L>6KY4TAg%ch6<^IQ0c`EiQ`Rt`aj(lKl3EwLjNO`lDSM2u2=a@-awqPM#^dBIcINebOiWAWL|Vv2RV=gAd*>~#OI4Q+Sm6p);BCs2rKVE)dX9IHF5T$$Eoc6l*i4VE z>Z!Ym>nSY{B&~jFdbBuxZBT&;5r`4jM0pjbUa`C9w1Tm8%XRyDH9={vyZr?!tFo?h zMa0KuAZFo=$QaL^Z_mAO+X^0EFU(BmQzb8|`4@Ex-}5K+=MqhE2ZX$hq0UU(*5o3% z+;0SQEO0gKGG?%GyEe@ZlU+bwyXbO@l}?bbAw-b6zd9_2By{&?>qsCeD(HI|3#X|;uy}w z$}Q;DtWZi>TGN%ohOga*z&Y|Aqw0SPu7_nePS15FbmrRsXywSkNy~cveoOrOXEQ~? zM+b`pN2eIYtA=B%3Z?EZ1#MTB6w+TCu%>?ALEY z`IZ;939}|q za;xnf3|7c>%{8e;>+!kO1}U2XVSwmnL zZ}}uU=&I;f9*|_Kecd94*p;Xgrk2!G>+$F7tH<}JtVXJK?@9*_v}J-hL8Z0so>fG~ z&^|^hM!nNX3sTAxp?EM&X61e03|DUd5r&k|+U-g}SpUfreAQ9mG&kzb%nY+@GHmIH zJ(a=JlHl;k%Ko0DE!1y&?3{(XSLPkQD~g{W@AEK{Ti>3tIWZ!nAAzC_^TcBF_IN;XrrMuPJznGvK6%t$hb(MWR5?J!x#1K`u z*vhKDyR1QSCYIxL6x(8!ulV%&f&5#Mz@CEjk{YO9uDq|(^*aHM8Eb@Tjfx$2prR1; zqP|uQLDHL|UdO}~{-rc_wNrj*)zDw3C|`>HiWfZpZi}KtdO-!{RBi=dC_1hXEe3Js zhx~_#ld=55V(*4bs{Vt@k`9P;vod%TbGRf!lRA-#C8iEwC>6N zIn_1URc+-5ZLmjp{)(s79k@m&uz9rX>~$TPmo}8Cu|V5JGPTv?6S=2^V^PlEAh?-` zdA2MUKS&F2j^$Nx6i%claEXqa>}zRvnJ^|#^gJGX9cdSE>=29}R*(}$p+f9XbPz?5 zaL~GM%fh06fBUDW&p49V9I9P=>bE9#asm+q=2=<8N1DzOy+2Ze_k%^G@vd4)6bRCoAjCAb#h|&F3S@UxDZ}@TopP7t09Y|7bHIDoonu*WC7gj_6jiVC%)Or~bcNSU zCgxVlX4&iQC7e-k0;@~*F!nY1@dxo*=b3TH5NYGlm6*k*^SRW!;pF@{V3A5^xZ_50 zy5Mn#4n@7V`W|Pt6x!I}ub(u)-es}ItDJvVINL6v{1di%Ci~vIa7mZE1x$p-KKAB) z%H@~*n4~xyc4tI)I(QRO_nF`6Ny$|PG+N9*pWMXqF7}PL#VcChY*Egq&s=weY1ef5 zrcESOIsF$IhES79;db|QLVW%7N~6}+sdcP;oCDYZ%IXELyhsZN6Pgb@S6wjSGSfkK4LO870pP? zWA06|iaHTjllIgFtQQuVByXpt?I4Gg0r=vfp)Ne3@j3Pzx8R1dfJN`4 zsWl6;0Pyw2IgTYapgUf}g-Kq#=nY};{5?=)UI=YQh*~1c4M6ej0+4#a<`eueMthK+ z`WeOcc~uB5-qy2TS!!>Z&f%MdtcKb1Y^|F5h$cRqvvF?v10_982rq0J%j{Sk_f2`% z(MoUoP5*2|kZ&?cSNgvyspUU;#^&5lP`_=xvtB(>*WFmY;0g%qml#^Y(1q0HeFws% zWHoenl+nsM5fZ+7$MS~0?a$t{bUB5cYpWm z_>a2oU4$SKo(d0cX0+#k8|sBh@ie&TI>FFLx)A8if7+~7;Y}XBQ?F4x75-4~=!ctg zlY<2P699nDur2sED|aLvBfub%BIA$5Tk^=^Hz_^&(l*7xY+s7eS~U$gIubk8R729- zGKsx0$8rWoLvPrDEZla+VH1e+=PFKVuNFI@Z# zrFSZctN%}%=n&zriwfYoApgJgKL72RYF$NK`Ba|#SkPS4%nSyV`?~eqk(y6CF7U2) zc>NGnnpG z;um_mfJX~E*52Ir&xA7Tuq<4tA4o(g-mSdYTx(N<3=tRi$t9Bmp&?`PThs241mn=q zD}O&z$@g$^uZg>T0TEr=X^FR-DSS;p(R*K%sK}Rfq?g%H@S)}$Mb(BkuCsfss(#Ts zY!%7gV379yNxsbSf+DZ@?GTgr2x0-;JH^V9MOyoMuk5vLitzKW^Pq?;zGx7&*<=zwxLuQFz+IR8jygz}DV;^U=1{M~(I5Qwk&l_>*wY z-M$??T43%%n}EZO5F)m+2+h1{io9af%F=C$f+mHDx zO01rZ({pI(xgq^+Gm$8lPrC=n13Zbcaj<6Hn#i@c zbaY|ljf zB_3U46Hkx`^`&TT^>V+Tf^AgiOepU9mF1}~rxKyRSU3x5-eGwhxbziC6gBHnQ?UMK zb)Dke6*wb%G&1t6Z=Kg5+O4K~Yq`=*af>WPra+wL3!H5^n`UZjE&zV#k@S%#OWc(e z7#rs7@%$of^y2a&Xs~o^gGO9HZP8pwx6uB;UQ$tbug%L~gYeVo9>)^Jbm!kT$~(`2 zmua!-YxAP%S(C=E)FUu0a3|5yO*Gmk&|QNHoeBMmfZ69q%+ z_HFb~=NsfX0cFV(SrKOK5E`6+#KN*9Q3~ELZGep+;c>tK2hZ~ zCDl<(mRjrG+-bk3_ZxOHe|Xs)yE{O$6zzJdpE7>?VaCa-?u~Ee8gbpsa5$7et)*y3 zj6=q|Do1BFHz1Jzlv#k`38mH7LV^RVjN+ixkUb%&V-So9UJNxn;A-&7R#Svbw+_|| zeICNho10k~yjIgCE-%(qmt-4cwg`&Ze^=P1=>vrEQ09z+;?Kl8r;^1$&FKrJngBup z*?5Yy;h^%L1F*)g7Qww^4$LcaaHRhCGFjx`HkE$CdKZ zEGai5=S9bEd&(e$02`KH@akS;6(JUOA#i|LISQ6d`ACT+wE^~Jg(?xU9GFIw=kbN> zZbn(BK3ouaiZc~9`da4!&0oA$;ba?r{v9oqAd^r&EyWUBfBBaiS*OJDFI0B+H+MJq z&&x?0-U_}$xUSIzn)gg%L$`9PJfl+06O2^+2F@hA&&`c=B$)USuM-SndvflcBDl1W z<*EucY0EJo;_>4jeYc?&;Z|Om6eJXT{_Y^RpcI9{#qaFNTi~sg@tL*_mKiv=W(h~ z6N>KZYtoL;ZpnSTwbjZvf52U*^WB~p8+2%$9A%&CiG&>CyE?bZZDj3`#|?|7O1e4r zNA2CM{3^jCJ-{;@J*QQ{m@as0>SOiFJU}EzDt$859bu$?I26lKzP1(!O1H?2zxP)I z&}hWnNg@jt4T4u?Q-cP5zs%I=R(sfL^e#;gku8fb9I3IJ)Wm81EFRH}AQnykkBvF+ zI=6RJeq_p=im_BjfB}4NYZsT7OMevVQn(_d*KzPwsjH$v2akiK!Udz}|L)}=tQ#p@BsVZi@(;(U(rY??@ zRbja0mLg5|ulGlA75#~#l(VBy;>@J*VuJf5N2JM3z^3JyWQ1ZD>nd1^6uhuer zstd9WD13F)@Xrnn&-rRCwv{pUO!SXCVVcx!D4o_*%9q$EAG8$Chtdav!WlOv6?H{i z#(c+1tz5surKU9#uZhr zQf2E<=WCo)Y&=w#J(-{ZH%lGyQM`k4@Y&p<>zdhx#8O6a)~$HQD^#Hzm!HA7yuO@~ z;j#S-He&fzR4$9MCBv^~rLc=TKcHY+r+Yq@!I-Iy#ou3lm^uH2Vh&p^9=NLRfDx}( zL5`MWifG~~hwF%ypU3c|$5Yrw;}2#4CBRZ=ZFNxFt0DvnYMQ^+;}z01m&+0X>nb)E zYMZ{6RzcxuHI%xVn0)G9{l{2MDPqC@fQeHF>(nShQYxL&nYEaz!(r*YQPkjvt)`$; z2PpU{K4J9^;~`iE5MM-gHnv+GMZs9!?vs4$KWw2N5J&pZ;C}{Xt)E}jPfu3d^*`ka zbO#xKldDRde~bSAqQr7I{{Y6V#q$U?sg8_{OlzECgSz5t|Kw6BIaKEfVvQ#sq_EaS zL2Y*n>{o0@@45aLUYI-dt!CYl(dG*uq|L(k?pEF;RP{X}p5{=Mx90Oo36DrE!6w4; zK3pV)FRW?4ZWu1(h7wXdW`i=rJ|fZ);G1r=s!!&pqSX3ZDHHo!%b0`r1C@2@@2Az- zcc%nP<@j7roK|;o2%W;oUt>O{3io^BedumL2mD6s6~6HOt#fk8!ld)>ygHtnb1Js& zLpNDJb<0zqp@C-&*+XE%`kB(AR&aWh`PNqb zlWxhw{lfFQpbijP80n_qv?8_Y7^D(B40ZW+AsR4P)T)P$<`zdi*4mWeqK(>9Pr3X8@#v|nd z;=s@ORg?F=63J`p0yyd{VIaOuVMKgI`WhvAFvTByn$hE1+o(1KGO4b-TRq zcac55eD7Ma32Nh#`%K(v+SZ$+zr0L*dBmjRO9l@qi;{^5k>2NV#%)pBUg%FwFZY8- zuY14RJY^v~%P-fVq>aRoFiWl$m4$*l@!zaDH-L;4{|u-RG$9VhpKc|JrPa>pD8~Bl z!VT)3p-t>adaRHGsNUI*5e_j;@x2)HSz>I>5m~t>wJw6!47PbaInGAua_iu~ovyw# zHHWcSz#n5Nzpe%8dA&C^I(vRsrVg2vzM4Y<{?=2Mk<^>TPqZ0NP+HG04o69`Kh+#m#DS`{9y1&}{9iVC#o~d-bky6)XZ>G~SKQj=V?XT8J|F&6f z)P2H#)Q_YU#IiE-N?cr`Qo1lico{>q@5QoR zDXV?hZxzzCy>T1+rlj&@|B!$BsJUg^XKF7Ay&^~>8!mYL=22^Cq|a$i)x-+sKDxzN zP01f_Ri?k$`Y`(KdapzzMPqfXk3Crbn-g^ZIv!C!d+~w!OH^S+{o_eVp7l>)O{*pG zW_c|Ncw9WacQvnj!w>mEja9TD>2fMqGW+HQ=iTX3uzIKO4(memIi5joE~y$!xbLO- zbqR7Hmbrf67MC8rBFRLL#_QCSH243JbnWUD%Li7}5qppbs2Uvb%Q=fT!9U9}^Kh9G zq@6Trt7H&dmC;tI1a)t%oG*JVVE}|(YeEe^IBDq|TMSX0+8140>{^7njCwan)N!&p z{h6t74NRC8C0hdPdqXC|J~go?O#YBV8F$qF+OPTHL=YFz4cRM<<94dr7H<*sX#fdA zM)Ow0{F?KHFD<0lvz7`Bk3cy=R=*1lcgQgw8$(R%tUXQW5VVuntHW8$Nw>T@1;o`W z>1-dK;fuE-KoI8l6#em!mJKbKMkC!bj?ZS)9dE-d181_GO(!0G52+~3hybwwz0sy% z%TUb^7HK(g*D@CN-}}9(ULr3>afw(Qx$T)qVEovpzB2W)&y7yLPh)@J z!ICd#ehd;0ujV_Cm&thL7p#7@p{J-iPjE%zWt-SK6R8-yeI?G=51y%&Qp=s=-WPyv zQMoU^>ByMwI}Z3!v^=xke%?#P-_z{X2qIM4`pL%YHYsz%<`HL^U(Czh5}~I7>W#y( z@#evIo#)$&)Go7@<}O(#~7m1a4}X3ep?` zv7pKxPw>HJTH~L_j<~%$=eXVabNnk12CA>klw7ZdLz>C|K^%1hXqzc!Y5=xg5Oyy@ zG^Oc7$hL;V8-rH|z z+%1~qO|XHRMFrtvUH7LRh5ZSls&abs6Dl~`-z=Ig2)^0y8X$S;$K-n-_bKe%a@pg# zs{JrA){?3By|H(a543B^@5dhhF5>%(m1G;xdM+p%ehQVm!~%aq7e{3$j+KUW#^G+ksGqj%zS*f$a7qe|6coi2pNR4P4sQCC_Hb&Z_Pm zG{bvttm|k$Oa2kK{e3YNY_UbAZBo{vO=f6I1e>kHV&GkoDj#u>x`Vf-_z6$>zMmSs zef2q*5}8MqpfkC1FMZyB+Wq;81ro9wxNPbpn_qHN&A8C@kvVn4ST?m9$u2TLP*P>t z$h&ONeSei;xU}nDF)5rvQ%)$0&Z1GQ#$m?h^m>Cv4}ba;@o`*ollk^S8InLVGU^fU z^Cmf&|75f9-S}V#PkEu8l*usS^LoQ7%(^H>A+g*oNpD z-kGKjsH*Cc?;)B=SP3+N0Yk5b|1Q9qdBaDc0UMs4HkZy4=Iee>M)2lNMSJk+c7<^% zMU5nZe;c}Q=T9f&|9L3(@)6?yA!19(|5CpG*ZaIe^Dq1j!TycdPSwGL4u78nxpH=T z0u>tH$o;1)A->kMt*G%|61KN@n?($_rN`%7Va=yj#!fkq{@846$DcPIvt@_oUua-G z#x)J%wuyoiD4c(4VZdrkfcWw<Cd6wdWV%{Ro_ANt|HrJ!+mzR)uXjYiPqJA^I%oV?gA?l{Fe0gqGTNE#Y!euqJ%qq=}fKSS}lt4#}eg z_4H4}#f!A*H|mt<%NsY-JR+}w1Py!F*EJlGs@T2%3?A2G>QC$&UOdAU zFZ}oifzM~!3MN#5p0&Cx)i>#iRXWEM;5xyOI^LZrm?PSK&6aqzzULj{pjs8(_IR_6 zl0<%$?4Ed;3HNZJ4AW($$UD;6V$~8;XDj=8=Yvdqz4)3>tz4rVS0^y-GkEik%jEXV zMMa3>9?TgRyD6`|m7bkH$$8!}=gN#^(@v7>$D3v@c`Wu6(zwvNAA2q5qk@@@_Xgi# z^U{ti2zhluNQ};?2=m4<=|@-Ia5wuG zG5&~WA+o5-Sc2WIIP}>mAEglE_=HvV^kukWy2#{Unr%TxdmgE}Uc1ramWCB00Uk$k zJO#MBjUkC08iQFj+jq(=$-*R0g6|@M=4`jt)bBc@ak|Q+5q=T2WbF&IUdj8~^Fe9u zj7Lgrsr%2LKh>vLTf>%33s{;cz!WW5_S6&W7H&XuY( zi67GzRS&pEfDSF>9JM~|CI;&rksE~$B^5;lhcp0JWKQ{J3&^)TrWin7%^HW;C#|xF84)3$ zmX@cpaL|vw@>5_FEV8(JY*tr)aK&jHN+DS>WbB-@b)#-p1gZ}wp&)d^BtDp1csLwp zxmCm%u1!od9g8xs7p-m&-p@m3IC8ml@?s-Pc1M1i^Jv+j{hUG5vth273#>+;>AsvZ ze)o%D55tCC3_yAQeYdmzQh4zMhv+%yHGbosB7nooFO|LTvE z?Pi+S?8l4&$L-{tU>(Z17mp@bGU&{da}~W-^jo7H8R`Rt?1R-EkRTeloNh`Lt(Z9_j{fhQ&-nJGZI!_C}1^+A7sl9LKmPfAR=IN=BAwstLX zr2uc!6-zr^0X;$#DX>k-(D&JGXlDqGM)EJ3L4yOyms6{Q%eIdYZ3D+R?E$!^btv<|@2y+pg~=>G|-vy}%h|HsotEx^R=3qsnpBee85L^-uoH~*_;A0Jk&Ax!LOeFNf;g@5 z)X|wQINGDYSvXq4VcEBLdVDnjWtC{eb5T=0;AAG>_$gTORmoWKKD68~>~rMsH%s)l z(U_!c>YFl#p1m>%`pr~gK(R5=S6@P{kqUGr-?P?_ z&ANxxtoyO+_qsAse>pM11ke+)bzW@WHL?PRdT1W?ot=*OTM-EPNSjFcJmd_VrdRG-UP&9+d5Bo;PP9 zpV)gRW-o~IO+f{@>_4(>hi8E+FLI;Gu*u>qI$WxvM=_PD9FWfd{#$F*w{I|i$wg!30<)Xkfh+XX@pl!S?J`}}Lv&r1 zf;z!Mwj}S_t9lN1`rf-gG&P1fi`eA{&2i{WTHhcbXYEDR%-^Ci8)pS^@$*W8` zUS)Q{&#U2rIrP@+ciG@}W#;M`A8RAXN$s19)6N+EexbI=D)*oD(Xv(WG4a@1Cl}P(pjyMsXX%{D+Ffv%VtpG?el(uFbPF%Z*(YnqS4;=R!HTRGz;jco z9tStv&8bzzPDnvG>cX4{nE;jkcGcFo7nf)sY>Rd`R*+TGy1S*fi(aSdr9z?B`_Z`5 z__|>~wb8AUD$jD~cc@wINf!NZ>K`f}Mu>*WH;+T`+vnC~{C<){DClr>ge}_miVrrH zmCq3w0G;@5hU=d$3khf7emw6e=o-~((v(@>XCC9wP@V$IZU?m*Y_3Ve1)m>e=nO8; zFkN9_KV(H?*+$V8rLH9(T70dL#VLzDYg!5O{oU+D*6M6;r@RY(%spH(k0(aJY#O-~Vybm!KSE7DFrisZthdOJbUB}o2 zNNXq^HqdI65VbQL?xbOrl@rb+8;H>+(h_vRZms@svG8|z+c(p=D}$7p6b45pwJuq--hOvb!(pZ8<(D?<~!(Q zO>}1MF{fD#TwPi&IEVlGHduZA(P#rX4&_a;P2$69{@7^9kaYrHB7D|K7{e!Mpa;F5_p3GP{2IW;7LqvX5Cb{Ug$0Q%9Jf zc3vj&2FDTq$M+RoYgK18L=zr83r z*rK4I0M8lmM{81Ml)b=bRzPk}7xE5u(x2g4KD;%4xjpqYwT2-|CUD~Tts2znojIOb zZMWIkf1Ks(#9iottSV)Lj0InZMCxQVanRtKX+o3A%Pehz&qV7OVPT}ZDQ#ipOV$Zu zI$e2hGy?7~Pc%6EEuM2%bF(OwZg=Vzl(rFslIH1eUWbS=KY(bhGZ+v5(Z>4|GgZyi zcBE2o1~jS!9=7;+4m1>4eF*L{#Rc5!9wFx7>_J5$g-Ma|PaYR1jT(p7eU(FhXCF&h zBpw7P!;r3-*Ij8+tg`qk(eoBrn1Sd}vVZQ5*_h3|HI2J#h0!2L+*%&(?z6v_9M=9! zQtEy3p3ed4st2jenK#k#yfv9%9`Ux7cOBP=aC2Q130}wxMw+LLf%}Q7{m(Es^P+3) zB32d-s>_4hO-%13HMg54-%5t^x}H?;vs$y~$O(((OJGIrk`MHo^?piU+quS|xvy)q zZPTAe>C%b0FA+qM3@Oic=mm~TwT`y-H8{f+18Isz|7rX;u*TPFa8qf(rR#x@Ki}yo!Qk}rIJITi%^e$H| zyv{OQHe31R4WP@*OY~OA%rb3T2LA6uYestWCsa}2{?8<>6FH+4L4ygNu6{1-oD!M< DyY4c; literal 0 HcmV?d00001 diff --git a/tutorials/text_processing/images/thutmose_tagger_tag_vocabulary.png b/tutorials/text_processing/images/thutmose_tagger_tag_vocabulary.png new file mode 100644 index 0000000000000000000000000000000000000000..5c97a0cb09cef58b6c1996f8199886644d87f5f6 GIT binary patch literal 6134 zcmai&WmME#+r}{fX_amzr3Oa2B_yOd2t&t+(miwtNDMQ8fCAFZ&=NB=h{SLhItHXe zKw7%t9iQ{@eb#wC?Ek*^T6^vNW&f}1cim83ZB;U224XxsJTi4PWqsT{i5t}q2ykDc z;b#Z9>5iwqsv=(HAkzkJasQoymI7`=Jjs=QD zntNQ>7zlXuNV!s)M_)=1Y%caV%a1mk@+7yklz1J$ubR{G#6!B6QM}Q#w+Rod#Yqh3 zl3`|M)0b-R;VSebG}1b`3y)iXHBE;{AD=po#9s1?g7$c2mMNnaEKuX~2I*rb(_}>c zZNj2;K`A36!Pq%U6Bmfgv}fhaS8Xj;>UM%M3#Ot<#|pBE{^`s7mjPPN3tx`|j#oyr zjAq{bf;(@57yNr_`6gU^lR5&CeA=UPm(|wQw=(!*RinfcdKM zjKu$a0h+le-#(nZW=YdrOa9#EF=}isBVoCJnw8&gno?pW>YD8VRL7?GGl1rL&mTvY zq4fjK-#=biFWc2QKbk&4j~E2#2EjS2Ear>?w05c=Il{)#iKw^|fw(Npqs#T;73^S{ zcX?$Ss^;Z{hJkB?S*M#$WU!Zw8rR|~g=0P>>29Q* zTtkZThZkU&PRaGoQX~P>wUV3*-8}lQwl;@>!>f}j0cjA_*zU(r=NRo0i>zDjMGR*? z8CEn6%WJ32+}CxzGHLCN*s;%_zA;2o-Y8SC2*p#9M!!F`(wHte3_%b4)#yNSFk=~6 za(Nzq9Wjkxv-V5W)?tguiz7aC4SO7je-mgG+LYN+rO52*{6$i2j>|x*BOT?uC z+URC0{Ly}Xji(YW`OAU&FhDA{&Nk|0+~twbWQqCnh6j*G@@XO2^V?%K^5;3L-7c(9 zOJ;GOAN_Oq&Mzv-ej#`1v;arMap+R@Q^e)@1-zq>#}$EZsWcaNWvPYq({lc}-M=rP zXr6r(W^kX1-^ldd^w8%Ha*uiw2VXyJ7#c)d6 zH(`3yVK1I$=VD4}%xEQ6-Ewi&{fMPF-^VOt3KJyrD+9VeX;vOG2^zKw^*VT)558MU-hr{2OFAJzZ69NC;5UdBaNq%vG;O2RW9_bgAT-oKdJUY`H)*@4L+-H)c5u7}R%H#3d2X=ssaeV(<~NF21F!v8veCTmU-sPdN1GTD zs>E%MPf(7REcaTLR^9JTh_dC9_}WdIo>}#}Ao*4YpInYy7Vn=oLw}1+(V&YGJ0*sH z;a7zU?C}>8%LP0SOx1A}tE=M4N4n_QO#P`q8dFAqU!;nEl*i ziTJzlCjDLF=MJ$fWG=0A`nJKc>+(n7YWm1$)@!wAj6vIP;wcB z^=5A^5KRji?Om@nE}Fy*nxAf-#Zun|e?qYYl4@NQ`~u?b z+&|mwPTD*SW(p!z=oFi61g02{O>dQ)NjRaJVHOHii!hxi>%-nhfPqqb*-%uIXyz9& z4OL`Y*_~}-h86Vn_`y8}mU~4{d2X7|29_Q}(q*#W(8rGm*{Uo%nQ0BZ4lnQ>BDl}t z;DX@ifH~E> zFnAAT03+)d;e>evL|3muQ249A?}ZgSYU=CrK3BK0&$6G<3X%E{nIee0s|p-y<%Md%#OJpr-m#tp3?}riKP96d~lWb7_4w?iN_Lk|QxhCh-xaDiD9&RnEp- zjLl?Pf0V2J<1gBjw}esDmC8kOh3$-62Z~*XP4SooB<`q^AHdng*Xs3PG#~ws*$Q=aF{^>Ht zC(OVnf1#Mxv>WQ?BvEsnOr2S_puARfF1*HZlO_+&yiZ6APIPkvauNMYaf{oT=!W^< zNH|)#sPD$W7bzFk3*mlAyaj+me&E0?JU%|Ysh9s=Y_Zs86}T1D{uak=vO!)~79y!* z|KMYKkey!_H+g%jQ-C0)Kbf<9eg5sRTX*cTLHEpzsW}^#25Nw1PW|+l(>A)jrm)wp zHZucaJeOQia+33Si?;ZuSxt-F=a(USk{2gX@o*D?q_~5+8m7lM;G(wQ{i*Nvhrwi1 zzcom3_6((j!;r=~bAK0%^U)J^I^X?bG4thH(G!M$m`pjw){?kibj$J+jTyj)B-_9C zVX^oX$O(uAx@5gVPb&?vSx2s-97ROVS1xZ!Tm><6zNSj%UFFWKRt^q#?^d{wO$ID$ z|A7Smu1^>x)F5Zz@s7HYM1CL^TvgwlX4tK_gxLti3}8_^3!WO;AHmuN>! z%f*|;`*d!tA69f9vXr}cOOONBL>!3ZG&3qtAZF1Mmx|-uWQXr7k9w!l3N2^y+>OsD z_52^ACYB-h|Ji!;=`^IF;gCKZRyzv#UJA%#Yu78E$srr}YNPqsr7<6V&`dTg9yFL9sZ`VlaU%lO?A^2L# za^0q%DCqKp+Hr6x83t6BXrN2_fa8PzQkW3~Rz5Rvz#k~Tc=5goo?g#t36;Uli9GSN zelXADebxQSr@^?!SkMuR!-Q(z4?QE*piz?<^hINMr1LjpB1=cFrwt4rkv7yZ>2JZe zKWd)|4xxu+#1_#^R&x)jXL&%GKITrC5x!s4o|1f%DWfgNbnA~oQ^4U}?9nS=p6Mg@ zmOZ4>+@909g>n#8{L3E@2Nu!CBsL438uYdf!6T|cA4P88&tR%aIwmNFoCQ?=Zseq^ zoTsh)j}^{bnPnh88wFGkc8>rC3DI$_w8eaY(isSM+@cwXIE$oOMyDzVbTzwzz+Iy;r|3vL_{8T_zKdo{t|cME zG)9M37J!oO8AzN{la-=$mq)f0B0bT0pC__4Yh3o7c+Qw6L~S~?rkTyS#Rod0VztgD zqwV0-qD0M?ex!8JN8wTzZCig-lrU+s2WF8JA&l0qv*(W_l!3*E{BD}pKfd)5G{fj9 z6w&4DE$Y|TTvQ!zqMGaYcz?ZL@ea|;=Z&}}&<a1R zjT_LD>nMLUsvJuZzfMOhj7z~rAHD%2`a9+?js_@6hwa8`;vXrOBj?9HWzaPqnOb}E zfnCJQz71u|(@fE&S_T$Nv(2DIn9tPoD;lCXHflfaE+5tEwV%4(kU=sD!k3os&i*0J ztm5S_9K;Uefb_DSmJ7>N(@YKkmFlAN%Hvjr-*iG`JdT3kwckqu2eZ(2Xn$(bS$Z!V z@_5z*m?=f`<%ogU^!zEog-`CTjP$kxrW=>)lNwBWLEIhxlJDgu22(k3C;Y27{ILz3 zz7cDKyN6X5S61=?30vAROjz@&l+cvZ$rz?{ISn#z(AqyT-vX&QXB;<4^UgR~odBi8 zfAC$qI8#KYMa^K{?A82Rp6ZhIj~x^dh?7Ij9|Rz4k3 z%(8ulLdWt$z|l8c_M~mxc6s;hUTUUcq<}DC$&4(4ip?tnOEW}Uo(o2?V)&*aq~i=> z!PXsSk}Y`?zceW~^c1^mq^${gjc|Eh4f!s%UN8Jsjs)# zVezDQ&p zcdp@!B9*M(4brtHB5~v6O8NdEeK(epTjgr&JgzfDZ$GjjE&^5iW*q7X8 zZ71h0E7i5N7^Qu~?y}oPE>N~p=|M^t!8&+N8+f_F>VEZnVX#Shw1@B5K6|Q;cvlfz zdV14BS35Hzop&B8IGX)ZzM5I;on$0!U|!1WEte+{N_pYZG8f3qPGi*ay4!GX6X9mC zu}}4E$%!w3d|lLMCu`wG6xU@bnrkg&iLuRCMKC^Fk(FRJGm$slYSyo)5#2F8(#hEL zNE&G3svxzSMA^xFLTw9u&yF57ec+R~L_!881E%qg zJV{4=7P#mW7qxjgn%LZMj@_c=gZ;*VtiQz{4+<`Jsd!s~qkJmV!D~0{v0fF&S%hb* zu=uNjS^7WGpyJ3}MzU95lQ1qbdPG(*rztM1j5TY<1RgskP~Uh$_b^WVQIPKSq(5ZF zYxVBU+oQ0_7piy0-uK0*_{z>&I$@SP-1K5;>JJ%}Ra9cAyz4iZkz|Fd(hpZ$FU#0` zOy;2M&nVL2)ofGc^i@=gYBzSu5yp3W4E~C!ac?7cJt{#uSY)v&Jf;mVyF)m7@=X|U z&sAn#wV=9r0I{fNQ{kDjYyCa?g{DOsY&|Kf=bCNq`*Re-BP7)XP~D%;=(gVBk0&*R z1F4Y?w3_%J5uh%s=ijN)ZafVx$=f|mEe~6m7&-LmlH!}9q%|<(?yuEn)tX;?^`gLF z4uSPj>e+QUz=>0K!@L*pl6kFuh0&{fn?FgB4%v*INBdJDA*s9(V;+lf9y=Czz4dobQaB6}CqtTe>1dT&~8MoWj+!ud7+UEg8cZn&M zK1jaz$8XK^4nTU$@;#%$eddUp3$x`hZa!K&d%62Syx#`t8jwRPInXpzAwSUqp8k!PX(-y(L z!ZkAXD(yQTV+tZ{%L`v`kJrA^k&O9TlCXyD6N_g_#{p2OP5#7&cTa4?cweQT;tfKd5%^8fB8or@0Nl> X5`O)f{W1gBtH)D+sjXb8XchK Date: Fri, 6 May 2022 08:24:42 -0700 Subject: [PATCH 098/244] cleaned up TN/ ITN doc (#4119) * cleaned up TN/ ITN doc Signed-off-by: Yang Zhang * fix typo Signed-off-by: Yang Zhang * fix image Signed-off-by: Yang Zhang * fix image Signed-off-by: Yang Zhang --- Jenkinsfile | 8 +- docs/source/nlp/text_normalization/intro.rst | 2 - .../wfst/images/deployment_pipeline.png | Bin 0 -> 53260 bytes .../wfst/images/task_overview.png | Bin 0 -> 49787 bytes .../nlp/text_normalization/wfst/intro.rst | 11 +- .../nlp/text_normalization/wfst/wfst_api.rst | 37 - .../wfst/wfst_inverse_text_normalization.rst | 110 - .../wfst/wfst_text_normalization.rst | 220 +- .../wfst/wfst_text_processing_deployment.rst | 90 +- docs/source/starthere/tutorials.rst | 9 +- nemo_text_processing/README.md | 2 +- .../inverse_text_normalization/README.md | 61 +- .../inverse_normalize.py | 29 +- .../inverse_text_normalization/run_predict.py | 78 - .../text_normalization/README.md | 53 +- .../text_normalization/data_loader_utils.py | 34 +- .../text_normalization/en/taggers/range.py | 9 +- .../text_normalization/normalize.py | 46 +- .../text_normalization/run_predict.py | 79 - ... => requirements_nemo_text_processing.txt} | 0 setup.py | 4 +- .../test_cases_address.txt | 3 +- .../test_cases_serial.txt | 3 +- .../test_cases_time.txt | 1 + .../test_cases_whitelist.txt | 3 + .../test_cases_word.txt | 2 + .../Inverse_Text_Normalization.ipynb | 515 - tutorials/text_processing/README.md | 24 - .../Text_(Inverse)_Normalization.ipynb | 444 + .../text_processing/Text_Normalization.ipynb | 395 - tutorials/text_processing/WFST_Tutorial.ipynb | 14242 ++++++++-------- .../text_processing/images/audio_based_tn.png | Bin 0 -> 88329 bytes .../text_processing/images/task_overview.png | Bin 0 -> 49787 bytes 33 files changed, 7754 insertions(+), 8760 deletions(-) create mode 100644 docs/source/nlp/text_normalization/wfst/images/deployment_pipeline.png create mode 100644 docs/source/nlp/text_normalization/wfst/images/task_overview.png delete mode 100755 docs/source/nlp/text_normalization/wfst/wfst_api.rst delete mode 100644 docs/source/nlp/text_normalization/wfst/wfst_inverse_text_normalization.rst delete mode 100644 nemo_text_processing/inverse_text_normalization/run_predict.py delete mode 100644 nemo_text_processing/text_normalization/run_predict.py rename requirements/{requirements_text_processing.txt => requirements_nemo_text_processing.txt} (100%) delete mode 100755 tutorials/text_processing/Inverse_Text_Normalization.ipynb delete mode 100644 tutorials/text_processing/README.md create mode 100755 tutorials/text_processing/Text_(Inverse)_Normalization.ipynb delete mode 100755 tutorials/text_processing/Text_Normalization.ipynb create mode 100644 tutorials/text_processing/images/audio_based_tn.png create mode 100644 tutorials/text_processing/images/task_overview.png diff --git a/Jenkinsfile b/Jenkinsfile index ea84290171e8..e2c73a9f9d04 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -124,12 +124,12 @@ pipeline { parallel { stage('En TN grammars') { steps { - sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/text_normalization/normalize.py "1" --cache_dir /home/TestData/nlp/text_norm/ci/grammars/4-26' + sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/text_normalization/normalize.py --text="1" --cache_dir /home/TestData/nlp/text_norm/ci/grammars/4-26' } } stage('En ITN grammars') { steps { - sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/inverse_text_normalization/inverse_normalize.py --language en "twenty" --cache_dir /home/TestData/nlp/text_norm/ci/grammars/4-26' + sh 'CUDA_VISIBLE_DEVICES="" python nemo_text_processing/inverse_text_normalization/inverse_normalize.py --language en --text="twenty" --cache_dir /home/TestData/nlp/text_norm/ci/grammars/4-26' } } stage('Test En non-deterministic TN & Run all En TN/ITN tests (restore grammars from cache)') { @@ -153,7 +153,7 @@ pipeline { stage('L2: Eng TN') { steps { sh 'cd tools/text_processing_deployment && python pynini_export.py --output=/home/TestData/nlp/text_norm/output/ --grammars=tn_grammars --cache_dir /home/TestData/nlp/text_norm/ci/grammars/4-26 --language=en && ls -R /home/TestData/nlp/text_norm/output/ && echo ".far files created "|| exit 1' - sh 'cd nemo_text_processing/text_normalization/ && python run_predict.py --input=/home/TestData/nlp/text_norm/ci/test.txt --input_case="lower_cased" --language=en --output=/home/TestData/nlp/text_norm/output/test.pynini.txt --verbose' + sh 'cd nemo_text_processing/text_normalization/ && python normalize.py --input_file=/home/TestData/nlp/text_norm/ci/test.txt --input_case="lower_cased" --language=en --output_file=/home/TestData/nlp/text_norm/output/test.pynini.txt --verbose' sh 'cat /home/TestData/nlp/text_norm/output/test.pynini.txt' sh 'cmp --silent /home/TestData/nlp/text_norm/output/test.pynini.txt /home/TestData/nlp/text_norm/ci/test_goal_py_04-14.txt || exit 1' sh 'rm -rf /home/TestData/nlp/text_norm/output/*' @@ -163,7 +163,7 @@ pipeline { stage('L2: Eng ITN export') { steps { sh 'cd tools/text_processing_deployment && python pynini_export.py --output=/home/TestData/nlp/text_denorm/output/ --grammars=itn_grammars --cache_dir /home/TestData/nlp/text_norm/ci/grammars/4-26 --language=en && ls -R /home/TestData/nlp/text_denorm/output/ && echo ".far files created "|| exit 1' - sh 'cd nemo_text_processing/inverse_text_normalization/ && python run_predict.py --input=/home/TestData/nlp/text_denorm/ci/test.txt --language=en --output=/home/TestData/nlp/text_denorm/output/test.pynini.txt --verbose' + sh 'cd nemo_text_processing/inverse_text_normalization/ && python inverse_normalize.py --input_file=/home/TestData/nlp/text_denorm/ci/test.txt --language=en --output_file=/home/TestData/nlp/text_denorm/output/test.pynini.txt --verbose' sh 'cmp --silent /home/TestData/nlp/text_denorm/output/test.pynini.txt /home/TestData/nlp/text_denorm/ci/test_goal_py.txt || exit 1' sh 'rm -rf /home/TestData/nlp/text_denorm/output/*' } diff --git a/docs/source/nlp/text_normalization/intro.rst b/docs/source/nlp/text_normalization/intro.rst index 5cb35408d849..e560372f8831 100644 --- a/docs/source/nlp/text_normalization/intro.rst +++ b/docs/source/nlp/text_normalization/intro.rst @@ -1,8 +1,6 @@ (Inverse) Text Normalization ============================ -NeMo supports Text Normalization (TN) and Inverse Text Normalization (ITN) tasks via rule-based `nemo_text_processing` python package and Neural-based TN/ITN model. - Rule-based (WFST) TN/ITN: .. toctree:: diff --git a/docs/source/nlp/text_normalization/wfst/images/deployment_pipeline.png b/docs/source/nlp/text_normalization/wfst/images/deployment_pipeline.png new file mode 100644 index 0000000000000000000000000000000000000000..9e8aa3750eb7a421cd2b16bf40582e984e400057 GIT binary patch literal 53260 zcmdSB^+Qu{_%}XmMa3XRz#ybcKtMpHq$Ng&v@l@Ol2Z{85Ky`Y(jz9_A>Bxf0b_u4 zBe9WV-!pjoc|Ok%-#_3vzi>F5bDt|;uj{_feG~XfRsPzQJ6Av;&^3jZG8!Pzc@7Zh zjNGLQz!gJ@>YKoibIvamv@QXM_a%!j!0&V}vbrvsP%9UAQzuK1wFA`NlH1wb$jUH>;Gg?os-!kXFF&+jK4h=el9-t3z+h2 z=)<5dkN5-@uL=mjrS^o*e&I_I@#|}IovUh;u%7pYJ5EV%OYVwp@WJ~QZUYjN4%$cK zi>GHG(0dt{|4y5}XP-Mgef{KC)PEPR%Kx}^diws)ORY1fr+jw<761GD=ZET-Pfr`( zGINlfp3;W?KQm~rg$oEIR}kcL*%N1>(oi^()8fg&A{{O*%ajSJ9$ebZ)?|vqWB?V><{+g2Y z-zzY~JPDd=sn^=8vumEytz52s(v_jy+72zD$w;a)EeZ>J6!*=uPwL%+jS1#u9Nx7O zv3VXg7hN`*yIA_~avV&_*-Nb~5&i@k;Oxc*Ak#6tbD0tPB5s9F6!@);G>e}4wRF~~ zEXd&IR*LbY!C_um(xXU&@ZR~Qo~GbPC(Yp~52%5nR)vUx_=+lr+m@T&!BJibxnLLN z_mskUi(@O0G6DkCm*vNQx|`mTU6F3Rn^>g&Xy zt@9j#hG|}Q!IWKN2DFR%?NRq4Kk*Kng^TB$U%388XWxuU$cSu6x5z28DG#FH(o}f%u`P9A z=DhwB$I2gv?=B++O;|#(x23IERIvz_h(Rh5*5mktmSP$G!*@emgRgl$(qL^j4-4m} zsTA8i+!lMnqHWL~VLxWdmJ9Kp_JhQWv?jIkrq7O&!KswG6)l@t+gDt(%S@{|;;(=w zwX%b5SB}wDt5S4(=$Ex~-nM;zDVHntar+k!{jhOw4XM%M=dxY`VE93(wz8_Q7yvwc zk>0A{Q z_5E^Cu3!GiQe6-`O5 zO=Zy!Ps!odoS?LQ#KznMQDHG91$Bbqz|)uOniQNH$bBb&UUX1iPdtNS2ZD^xs9u{K zp;S^laX&;K;XmkAnJ^H6)R~0oQqe?2@$Y_#{>n6^ps!R~{Mewn*DJk!PJb&WBPq}} zg5B70IFAud6)YrAZtd3nQ_PaLZy<9fBmAio3ONXYg`jVj&tL5s*!+cu#uh3YBE8tC zY&F4^ot-f@36-yuSa+gUf!w^GmhlQB)vK*;9zvO%mmWA;WocK$Wy)CDw-HTH{=l)K z1Ri}=Fd}m+N=G(Z#cr3i@du|5#rtZ9B;hp=Z4}LnWwmLwHZ#6xYr(A24V*WCUw%<3 zUPmF9*+CtXpPr5H+dm&*+eh7HTq|wK#w=SfBn^i>XK#Al;c7UbEisgsp&~D`h@-e` zn^+CrZZl5WK)#o{6-7HgZOM;nU$ZA}nx(oOyC#J@^nnLZdpmMm8j4zl9KB^5X^Wiw z1?DXAFmt($`lkybxr@pnkF9d-^3NAlBVT;Q?stMMCb?qBvg(!}3`DxF-0Xq6DWdf( zmmNRsw)-;kz7_3y|4z&2iM)S78H`w=u=3ceA?;qS-JnjV+qCJZ0VcF!Wm`)Np2p(W zt988$*7RMkY&J>}VVu!9tPyorH!HH6CbAKA1@!*Lg4XDVP-e@|Me|g)rpo2aUyyRu z`1GY7!B&||I|v&;F;iuFJ+)XGt0;9QG8=_v?q3lWv8 zt|Yj<@v;eRov70*|CZYfre1btqg`qga7AYgKN{vGo0HhhGsc*_b-m?idRZ}{h-+%S z`yif1Yk0tGyVIOrYt$Cp8F^Pl;Ei@SxV8Wjj{DyC7*Wogk?y=drpGXT923s0_Q8#j z*Zz?B^J)eQUf|JE*vbHvE>nAIXL0D%%rf7|@im=j%c_>KA@dg_j^tchGd*OK~!uwt>Rs8KYKX}KG|nA@LRWLM9eLCH|B z7nY6(Zf1^PQKEBP3Rl}QcI&9pba2mE?h~(CAG-TRJ8QWwCU2{)xuOegMkVHp3XHJor$7Fj?P=(MoX-oz3m8 zG{E%$Sc6#XUn*+J?>~ci&|)yLfr|Gg-E-40X<^ z@3Hp|3w&9E80rUIBh?WV9;u8Qmk4ps-L2$Qz-_W zwZr~EA8$}AgfAVe9iT;nt9+NREF6*tAMry`i=Qy}k=;dxF^Gy-*?1BA{CPbu6NeSa zYFU5zZSbG*`~v{NETdWy3(_~0grqz&?4U&$@sK`AbQ-O z`3D6trABej`An`Ho8XD3dmlLxS)5!Y(W&8KO>^64Sosel3f`;JA9#krZY19mDNpU3=@1*9%Q7h7={tO3YrjS}MTb1*JT+~QZox%| zuKxXHZn2S9dAAqtI+Y?eoH>ly3vCWmd;*!n5V_mnByWFVnTsHhP5`FARAS(Kz#V-Q zzxjf{|uxYxVG`RLwI<{D;yUX9^y*%-og5jW)` zF3E?{Q@9={pE8H+kslJq9X_l&vb}Tq-KxqJK`*r)oQsWmGGobY(>;`6Eye>9Dq`v! z?u~uHU1EG}?4q;AVLw*=yfi=jL2)F%a}t-U2S*;~5TktR^<*DCweFtF70$()@8Shm zt#kGwLS=H~qrm%!#Ay0&mk`LYlpyNj>2Mtmmd1x(u&QB=Z2Riyob(R%j=Yfy*D6S& z1+o(yCuri|+cV~DRm@oJVFN$!&CEWKSU`V^W%rLYc=S8!O4siiJyFH05f9qZ-H#i+ zl03#ryzC9Ce_CWH`#cwsiH4y2uOafd!E;O897-PY6e^dMTZb79GF2{;k0F+2t=P?5 zomxE0vKQZ_av-jN)iqQ6oy$3M%jvdW%kZGlQrHw7NkbN|W)1G-;^%S0Wo9z{?A5rT zMY}mLJ$wK{IQhaXUlS)a)_$|6YoT1sW9cCF0XXyM$-gl%NVuNL&-iQ1lXbi?4{@{6xltU3Wu?=_d9Yr zXDT_KYNs@_lA^|IT{I^8CQa@t9WSY5hS@=zsk6-5<21P~DoB-Bxc!f!~asw<6nhOSzM)y_VC<>&&T?r$dQjJ&(}e_H+06&bZd)dgX{6X6Vj?01IBgEBxZ}z+$TiU}#&5=P zD<#`I88J$2s>XB`^2y>REypyvCOL|TD{~>b>HC_8)lWxqg0=!M5zoT8I9-W7YMg>g zHuX+0C{fG8TLf!(Q+SjBi+r4>^JAoW!?6C3B+C`C zh`8tqnv&_yQJsm$AGMYHY&XKBsV*{U_lwq}Z94~7;3aJAmeKaDR@@4`{YOLjRn=)D zf#t8nyPT>oQ25Pp_lqA6cNB1OD`WEZwTVGXRxSFixH`u&7NzaX*+(p|-@jI$ZpQCE z;9n~WG$~!z&X~M)P#8Ts#oJ5M1*ZSIS;WC3_Bn0l(?p( zDR-bCfk8SSxU=cah+X(t=L8n>y8ea?^z2e$`aadYopx8NeBs)dlm&Mr*wi&}nYVNS zAA1+;WnQ3hwD8NoJ$p|#@UfVZO`vE|T+fzg7ucx$ImX?3xpdv!dQt&A?4a%6m0uCa zS-Pj0P&z#y6Lr%*s=87usItzUR>bqGeFHdy{a|;)`7|&A;j{&QOY1JsgEZakI0%)!zGi4CBu`V62{X%>$a8xnY+B5y zV3b~QOxsGOIi~g*amY&HtPB&KX`^25faUmy<#Bh_w<~g@)B*TmF%nPi# zHP+i;C++aN_I~j{uUfL=h|%vhMjH#7;pv5Gx2lrqqf>N=zjQSn4=?okx&NpeLBg$M z!?rMQEwsEFR!wS)e@c+eeZwxnAKl5)t^dnqMXbIC@uai6suj|S=N*JDCn_dJBU@&! zjZkLnTRc0%Eno&pfEvHe#wSXndS9%GRVefN6s4^Q(k5qqrAO~$Wo^Knlth)vq#~WU z+_IUrZG+NQ8HlK8@SSXo(H?Bz#vNC#P6@pB$7R7V;|Vq?ZJ9)Z6(0Yg5H~Q zyXn_yE3etZY!&jagNO4_5SRi=O^W@lE?JnmQj|ySWV9@|c*#nnNuwYa*e2(^{@qD? zWt;1F4?QJ9CHq$Ly9Nt}TRN*Y8S*QevKAz33pON563Vj|cb;VQj3C-1SB9O*uSE6t z@84tdS1BA-q{q4j9cU(;#XF-%kMQ{)o+9!)D0G)*#lR_wCh=Bm2W(8$^i#~+*|PeH z_>T@N_^>;Bgod<(ZKKoh=)%WK< z7GvCB7At@M1#{mVmdvyEWy;JtA|6U-291Y%i|@=cRZHN#g14@_`|Kj}Dhe?(rk=}$ zQhUxaLr4^wP&lcEE~`p*lrdfq!S9N?EF1O-H$=VoaqB*t+p-mwexu&~hbAi2jD-Z2y+RCy72+ zi!VIwqcpw#&NvNo(_5osL2Hgp33t%pq`(M7TIESkzVZ8p= zNP}UOn$G(6=;Uty-ETYYCN*Uoqh~>U0xg^@8M+V$A3eIloLaTNhqhWCE^)@9E;;l! zJ!75>!}4ae?9+(Nm3oeabhVTlOVLK#IUCNQJt%XyuO2y@btyKArdGop6V1LE?#{-8 z3h!R4azD^u2@zKMLl-aQoMe~Q^GoeYK!%NR&7{E6eEz{tXQ2prJc8Tk(4$J^Fy95y zU#IE6C1=TLe{Zy^OGO}njg!4BhC94*$Ud00{?{~k*{_^m4Y}T6zcd%U8{=n$O~)5dkv2p-k`7Sgxi{&WHZk-y-J zT>1KN>M%*Oz^cr&5hZw3CJo~0LX5)4^7ae3sFEP{Z-9=4Mwx1~F%P9NakTQM7S^fB z6Fa0-(ClJ#V98OeC*)-tYNoc}+M8EtyS<~dONKfO7lJO&kx%SFmb49Ju{?>H@8J?3 z2(;(i%hCL<^{)eRRz7<5kBZ)~S&HVmx;Jx^dhfHRfu9-&3f?}PYpqmuA#nZLm>aV} zu63pnR!X;S>hOkyY3+KbT681kBK`M{J0im1!kE=(h~P6Kgv}vbmbH6U z7RnT(@t^eN#qu^^_|+`a7K?$6N{wL!6RGA4cWrmX#CN0be*3v@@*uB>1-Z1%{vF%; zs&G>cQpc=gc_X56%|v+mR&mtqBb!N)pK=#--P;+;wWv3)Q^B`jOb{(?@I9SzC)w(9 zh+0~tg#n_XYj>#6k~NMuI=B0K*7RTt!c~0i3LAc=wsyehjc7eEwJ4*ZKwYQO5r6Du zyJeY7)$Y|VC2GCC31wFh(5q&#kl1=Bu}qxx7P)QmTT_qs9OHJ>n?tWSjx7tDj!8Ro zjbwBITQA43U~v`0)0nJsJsz3L$o$@bxIft;@cdIts5id*1E2RzP8Ry5mV;f=kmKWeBko3iVc zrH&Mzeb$UReaD`CB=hXWFA zh}uW|cCu7is;V9`fFlRC!*kt&RUc^<&s>boThWB%xUF{%LFBhA-J`lPlr7V1(%Kv* z^V_Vr*AA;uQLEBQfn_oCB{YA(GiR;hUB{d&ALL<8G7optCnP!3!l~Sircw{KADTmP z@4d63FQa$K2msEea@uBSr}NY+pJ_wr7Mpx*(s{V+|10q^cstVtbIuSjKo8wice+I( z^SSX~bWhCD_mD(}ucjSA3=PzQF;?0oqr}FE8}G6#l%%H^In!>9aqtGTaQ4FX0~5*m zwvDfPPv|=Tq$=uBjMvMPcX+3-6dvPND@8*Ne)qPk0nc$K?`1C6u~)e9lAg@q1IyM! zDmzRIrE;Z>le-@} zc^oXRrLUPiecQjt5T15^&?$1Lzphlb8sV0`qg7~OR8VsNZgFL`&zkT|)5c zJ1tmQ1#Hs5du|L|sOlAe;l>bZz&w!niUR*Z>7jbyH|nH{=M>6IkPXS=a5F)|g?nt^ zrMu%-bzG(uGxkl2yG7&Se1B!a*;#W}ra5!^>vg{%F%$9XafAX}chLwl!LtbVi>o37 zhPU6Uhm=~C(s(FnhYRuj(%TSuxa5Z3yLNR(NNBkJ#X*Dam6*2?avYlU66puZOTEzMtHz8bhlWMZzd5Up&-)&Z>^W}1V zKX(5Xy&_XiCpP%$ZqG{1YcDPAZ!lGDUBOSt%<|ucGdv0r&XFViY9=`x^kN*lqL#=> ztn0^q(ayV50(%pO;fV8s&PNv>TwmK~c7NvY%7xx%D_2E~ft@?bB)%%*3|jwu$G`;F zI9WL4w~wM59&x4hJrMI-o}4ob>|1zd`1YLl=$Bx|MoVDg(Lh_L0Zv|nOpmgo{qb#0 zgO>CdBEMxRY%}qvMNjNV^5d0cLxDFr?(Tlcl%eed_GD4L3{LXaoDb!~^Wr5$t08yS z^1q8eRkA@-I2Pq#7(Umo{~3wlLPJ>FvZtDdOI&kRj2mwfI-#HaL+uI=OlNN236&cYPuBFlJmc3!Xy-ZEgwfBJbtnQAuS!ka zL5T&s{?H3oU6wHlw8i!+=}eKUfroqO&)|2-U+br{z&-T?8N9g4DBm2K+}as8pRSmz*isy6+W=QlhiLl6*T~f%*I2ccFI2VMTdF5(UxLnOL zbjQdaVYf9YKZuB=H`Z>A#Runm`Av++WKElUc-H34oUzrm^R8P@x{Ewk0PELGGNc3A z`9>DNeQR{3xb;xlb5Ea2lUpM-HM}0Wuj$v~73$nXwU>nCO)75ciQmc zhd3oF237^eQHq5(29|X}oyEB7Dm+;RyAnB6e?eq%E4z^8E#KI{z;E1BWZuJnYXKPd z5+$N{DiW$WJ}0miXk`+UQ%J=|_N#*7GIW8v$T=Q`H;(6OHWW6ysL&pNcC)u$WhN8f z6bch8Ka|0jR(v8UqYLQFt1rCZDNl>}^;v%1c@D8A{KjT;RohilC9F|4h6Y*EVWjk0 zdB*86v^&GVqzq=9Wk?2>E;r;h+9=;`>}c3UmdX|M=HL7XOlFr8eR-WH`n9=@!W##o zD4V7(f!_aK=>OAc7?rd_$Ru+&DhOJ}Fb0Pj3=%Cnxz8W`EQ6pTZN}sA7BLogBJRFg zt1PtHMPXGJH7-c7+eLHt*N2ya%M^^(ly5tonULywN35m^XszHsp7cu;`*UZpvy^l4 zqLNub(NXL7iB>~<@rn8lAI+T*-$J2wly^>oQC>VDCtv?AMu^o7HUDO??hML!nKd{X z2tumPIYFgb@a<;l% za(C_QHz%CI%LLv{$}pH&=o(l?orgyBdO%#(8y!V?J%u_G3-ltA`)RV$y9TOck;nX+ z^%9$(5j^i$Chv>d3L|Dmwh@e_iZ%?zRhjfc^W)SE^oNPj%*sohdvIUfNx~Gwb7MBt zfrbTXXkGpWz4?^T;aa@@@}<33D7zke#Jc*6SeJqfIalk+SEEZ0k~UR_&aDVw9S$px z9a_Dw%~tVQjZ4Je>`(XHb*!vWv5lg<`8{Rt@~C10pJp^4`}k86o6T)Y8~cIEXlXIy za#>5<;cQ5aLImsVVy_y(Fc=9R*e=cAWw#%vEG>DwRI?z>fbfwHFw7F_4A0R63d@cj zsp0YL(8oENUT$jr!EL+eYg+v?xeqj5BQn)$Rq)u$2bTG;*5A3sIC`z=IAmWNOPAL& z5zX1=wy*NZ%XNJfmYePSgLQ~|xHT<F+Mp4jzv6{z-aZFz`TSkv4Bnv9$PIv~ej^YtFyn z@qxcHbjfeV<6!XnLP@!6oIsn}>h@%}$?T$xsM>sPCj=Ez7V@Dbp=zlZ<|s6FJaJ~t z79WyW1hbfAeSk?CknfDaFL?C2wdK~O zQS_FSkj3fVp>R}<=G;5e{I!h4Q|zs%(4#5E+k zDlYE{yjG3g?e25UmgpE-K7JLDkce=mUL-V04X2X(c7-W-#7l1Sw8)oTyMaNY4_aR z<;X#XQzAzNg&%PvVk&Co!ER4YqPW*K>~(8y-6|65va{XLOqG-@lTJ#wC)=%|*cd}; z9nLlX5~W3xU1tB23qAS(^_WQqY+vWb=p}uPrn>dhO>_@B@*IiJ7D#9GPh zgW6uV|QCx9kfN;fN7WykQMYJ?_0WqZ1hUAPYBU&-~!F(`|c&> zOJ6C4Ws5VPBIpQt4zN-!9b&HJ`UuRWTLZz&`Hg{R@uLSAl_+RR0~ z*MW5e`K5BGta6BnPawkRq898IyL*>HWi9FJrKMKU^pE1m@YuAkE1K0RuZFWvzNBCr zIhelsv+>r7CRaI3>SLdbNyB754QR==wvLF z?%nnXq{@Z_mshi*#7-qm@{kqb_$ z57Tz6jQyDODKfU~b*l6Rx^s%)L?CCgSl>*p+pTSyCssF%?I#sV=uxQP`{^_o1ZS`b7KSw`KqcMpnZwHvP5nTvrx zh4O$$JMzbLbbc&5+rwRE8xe1rm zT00K9L3;A)mS2FRgMpQ6JHK2X7qBlpdG{Th0XU^&K!-}ow!5qi7 zOE0gND_${4_mV6&isQ0x1UEBAfqn-pD$u*YUSOA1%`KqN? z`P|JUS|HOnK|!%2m4X8LCV`5n)KJOcfda6os%pDke5$tL!#x>fKk$jBQ5jfS03Oug z&;FiTCRWd_z+OC^Iln9%9$C=|EZ%zt2TIslBZ}s%&LRCD?)ZWz!YwM|g*?mh&Q%W% zh{Iw3O6MRRp&@HRESV2s{NenGGK_;lB5w_+YBQvPjYx)t)KP#yQ$@NoUveAsA|t(f z!UU9@5Yul8R%1B63Ep~4^Tyf|Lw$jI#WO$lyPKs>1X=ApV0~cB?dUuM2cF@Ff-$yv z;%dm%9;PbW@A>{?`)Kf(+dTt%w|wxNE2k?|aac(2NvlD%B2p~KGO!0FT@JmLw+AE< zi1z*i>EHziuv{cqto}K4otKs|Y9>G*QW?~xjmVQNYMxzA-H9^t4*{AAvrj-f!C-|B zgHkKA4-=xIGQ4!jDURmh;Ehh5K+flmg_WL<=4>)#jE{33YUm;y6u5^Q*=^7kKd3>V zo!%A66i+1a`vp{0=iHqr#3H2V@4bR5_280Oyjjay-!MMjuJ%@`L2g#o9R}1l#F0gu zuO=Id>L`LrgbU22ph)}4Obieh^!`r{yINn&j)sSR_yebs>ChOVgilur?u0F?L0<)0 zf2UTXGY#zQbVYnM&e$*9lneL>`CrQ_&DZ~1R{8(tvdhGd{}!00&P}%jk5xFPH8wUf zE8M(!^Pp9vE;T;>jyb;ZcE0 z8rAQ$@D2?PQU2D}Xn=Ib3Ch#a(H%4~9p9_QPFQ0;eL9DA?vog+c1H#U2OrweGcp#) zGkVBmsU{5|VT6_5nbFbFpwQ6uQYNg)Ot1JxWRKNgG`GRf$jFE_CIMQf7Z(@jxsc@U zviLhy64Tn&2I&ZA;o#&P^xCLJTVsCycLL)?1x27a_`W-m_AdS^ZX3b5bQ zhl`ZAZx14UEi5fVf*;?+FfIYr|(|X6V{;OVY|a_JB$6E ztA%YtzkjD39Ub+~m=F)pRP^+@aKfs!nESdR9O<}=b?0PZku@lHh-&(?SiDx_vAt8- z=cBq&cUT~~H{~aZVP#=i>M}VllH8xYU5gtw@F^?R<`6}F@V;B;l!f_-0|T?ZhCrpUzuj6a=*_mHmfOs z;^NgrPY6Ufq0|<|ktE@z&0D*p5$0<*R>J{Aaa&04M)!3a3d{kfR47w*<3LH{)RS1n~G>f$b6gc9*k+^>sF=SWu)zI{tsU9}Uvd)*#eRl2=bUWeoM4+yweA{3|Wxz)ku zx;`!nI7e#b>2Va&TXu7E({nCL=L~7|je_d+fgX{`yvbvH`H9fIK+d^Ocw|W){cU>x zK@$j}yu7QlPkgo$CbeW_WS(JwpdtMdiCG)|e2rc#jMt>jFfcIC6Nl0P>*$~t_Sb8+ zReOL@>JPCSTI%Zkr-|o|b&YXb8>2cgE)H#iq~g@xS!|0?lVp$Rp!_Y&#ia}x&Rgma zyU6(NSp=)f0E~c!EddDNlMMa!@w}pv(qJfqxXV<-2T4qCvV?)jUL(y;zdX~D8tgz5 zZa*_Uy))%Uu{=^}PIk5dFej&gfIy0U8|1Nw$chwkTT1*SmExaz6!{)+!&i2e27Quw z0q-m=EeXWY0;}E+`j%XlYj-CGuN7OlGGeN&!+M8MIryQRUUN&Y-A=Q@b9YzXV8+Jo zysi0BW2h1mBIdgqtFNzrW=!$Q8;F$4OLH!2(N+BWncC9Ysm(VV$r1;%zf-%))}9!n zm84p~(hWi|>qvEV;{s=%m{1*U#D1fH7yhB`&Krg$d8v4;!y2|&K7fL0-CYVuZ@=}n zUt3N|P}_mRPDPKW$#sXO!knJnvA(z_pW_<*`shP1sWG>^?qW$~ZMz$otnBP;-`ytX zWAFMyeqjzp`GZYHxRkr9*Y0Zn_lNQa*sx;jKK!D*i;GMBVgIVzimqiwFI;NU8l#Rj zVp|z01OTQola(aG^#Jo%1X^*oJX^J-0fTx%J~fwaN@OG@*{>-Bg&WDRV-KvDd%=&i zAIzl$4aM*dXlu&yp5xkNxmswN{MpdmVf+i{Vmu&HYisNK8LErdAF;B^^)U8Xn#}xV zgD;vX1&1<;yN#B~hu!V}G@>gSdbiRJa~ljEtB1lv@o7xlG(2yJbv{6%k+o4o$E-x6 zIKnVkq;H95En&Uwcys0u6&8U)o!pL_ajoU%CY{E|$9t}|?K>|VpeQC=f(a`DO{s+2 ztnH@dCS~?L9T6wDGfDZ5meJH2lZ0KUYVrD6W0(afS?NGnDP;<{&fdK+xY^lxj5D9C z^To|*si+u8ArPJ=*MKa2G7qsw9s3-*0D#L#c%o^Lo9O^ASHe9sT-K4|z#KZJGZCH` z8RQZU@j1)QfsFP|O-*M*I~E3kXJ*&&UT=T?c;T9*mb+n@N8kpO8qb{> zJ88c=%PW8hn?IdnQH@Xk`_}Cf8EB_(Z*vCcXAQp=%NLz~c?O_GiQSzylbFK4fBzbX z@fw$8qn+`?_(AxwOFgDHUR1N#s&`cfvw3#0Eh7}R8EmrZj=?~y^C#`CyJJ)71g&;* z?d|MVQw}%%{mBop_%e^}1MIL>afekgE;k=#TZ19xZki+s&&Z^pF5bd{-5nZ2+ z>PeyhM!p4l4;df+c&5^jC#?x+FMM;HX=P_+ZC&I8T@{4a5!}+GpM7}NA;WD`}95gYU6I*t^~sOEJ*HaB)tP4Fct{(6(nU?Z>*xCA{X%Ch8&=9H&A!W zg>U%oR#Og=fv)O0ITiKa2QJZ`ZVq}+Ryb;RFdw0Ihv5R~^{4xQgqR$&^B~&i+E4fH zf$M>35#O6h7+x=k!u-`stqXrvOo(Fj??CTwrd3vsF5UoU#;p$JD}kuokn3j~_yhqu zjq9;WYkGExq%4pGqDiVGgOCndVd2`Ha1IWRuh-As{|cDU=+nHMKzbb%v)<649ZpsORxY{?`QNp2(=+fZHoW(%kO6Urv(- z;*J(ji%lCTd=g9P3K5Z!krpnrmrVcd>f(EIzW%uMfBAsXXGi@sh7yr5EXkoDzB4t% z<3qWC_cut}#(=nP^e4|bxCWyAxjNSsa-9ss*BJZtWIDIf@ZoF_qjNnSJr`Y+pb3^LiHwO-|_cclt)G=Uk{6%fd;Y-_U(hsQu8fc*pKz1cX`TItH`oNm-y&@AtZR^rZ%A+ge2?!ED=#r0G{aW)8of zt}EI>OTcyD*wZU^;|ACBKLW1*J)9&?kaVFHfC72Obx7K!^>07iX~=hM_W`5-7xLNJ zk4R&_Zucf0PR4Z22>|8;j9GC}Cl83ZPrydH=j~lxga2dh!UZHu2=LHJ?s^`GPm%zp zsXI~BIWj!l?0?z&?7XerW8lW7Zg_CeqfEc&qy&>^@`*%D0x2|;?6uDSU(i5nanb$v z?=_RQ?@ir_sr42mCEwYyw|`#zQ(TYlzRN-d;LTJQJnA^CdofLczuLgGBd%E*m z`ZUb+N^PS^DGuxraX17wxbU4A2Pm!Lf%gMPfsK^!vB$sh{r&>bNdhO* z(a<+m3duPLIg!}pXqx79a1UIR4kb0U6@a@S&?QI7w6o9JNt*`&Xaw>sC@6?zI$((> z04kT200pGK0{IO>R{%(^j-v;j@he*lHUUg>V$_ie$A_dOe%MO%SzsW^aT35xguOPo z0J7JydOu&2ow^jjAt2Z1-#+H(M1i zqtk|Zb%)a-l3-lh~Na>Lz$!; zPR%yH;S&b5Q{IyS3Xn!S#c<#uzI(z6DiB~=L!yY2jOK$*JUunT(>)YY<1}ROb`Cu5 z2B-Y)I88yWfY<2?BBTFZ==Tk?@2tbvP;V zKatwe(Q%sc`@``8VCy2|X%S#9Kx)su z&I8;j8^)kibPF&k5KEbV-;xRrX)?aAy5?pi2;k{n*xvto{lSnoP(+D29zCINJ_C~b z^j|%h0K6b&a@DQc4d{|s^(5N3JOX|GPO2x9C9#WqHyU*h7(M}ApYZ=mfx{MFUsJ%A zr@(OK%9Tf%XGvZHOJ;L{* zDFB>0v#v%wG0DWyFx0?+ldLfl==In6d8<(?WwZ#R_t4@iPn;jq(KAx9T9HfI%I&u` z6-0GP(E^uSPYx;KUkYN%{{jNEu~!dCAX)+B>;gTh z{s+oXZGaB`2YG{of=-baC~|zaj~w0OX%U=}F$beg??Rcrb?k#~4=| zf5-*{&H-p~+kF=+Pr~x|Pm)xHgoS~Lzv;f3P74n$A|U*j5;=$^}q5uJ)N0UdR|}#IthJG zx4-@?fPsJ1Favb(Ay}ROW-KQ4ro1Z_Ky}brnlBCM*|7J4cPq+MkIy|oc+MoZS01D|7a#C>7mH()EOn_c* z-@KS2FaGG!lmeyGx0DNyu?*2I8&`i(&ZR#}X-78U2{S3|9~*Ca{rcluYgnV~L8yWcKCSDW!FfwE z$+dmcR`yq@S7roXGJwSgPg#(SH+o zUsOj=&m8e^_BQZ`m*j##qxxWX1L|_srbR2;qu`0AfbuUR075nP{&W6B-URj3f%Ct9 z{gRUh1hEk&XY^7ws+R=@nf)xWl`skD>y>8yk~0&~Gx^>0WhzctnPz)$0uD`j;JY@f zh)9eHab5(_<2!Yx4$}D3r%$w}f|CXz9kZYFP3n`S1Frv5h|dde=SrZyd9$3~2bk49 ztcVF>rvB8jdGtKZkK`QhayQ@O%Kn?_QJSe5fDR$_K!<(#0Q>dIq=|x=uYW(kjEqB7 zM+7S!!*dy#@vm2+9z1wpKc2d3Cp2io6UueV1gl)wFDldA@m+TMYvEOV;f^Dt!~5DiIK8? zH6f~+^#)l#&9GVNc6u^~Ci_Y;?19 zpWA_VjJa~gdge-*8>>8YUI^%}Ww?~wm1!Qw#jtcu>)V#Y1NP7xx2(FYHwU-l=vGuF z%4DdIo`fD6cU2*D_4barewb~TI~IxT)|P~JH?fIUS_=lZc@!T;=`~-g3yZqQ%Q&af z7)~j$%ggeMKD&LU$>01kae_9P_$VYIi#(?xqP?R_mw-RBAes50d(!jMLl5~UPVcyH zAJqlB8_CR8lV<$%|00SE@GeJpx*7%hZk?%T2yP_wJtgc zbH1Dwo4!E(UH9E6zpuIO(X#RjWhx88=6z}(-@=!=?NugyX4qZr@V($~oX8ohA7fHt z{WY(*><|;9Zq2Y>%XRZ$z0KXn;nB_03y&SjLOz0V{`7wmp4Mtx*PcgTRNZ7{2;FaHv0ZY{di_gKR?=>X^hk~==aRA1wAZ~ujA zT!CJYtcuO~)v(WeYjO-`({(JwIqH3<#{B@DM}+P-jM6cv#=NVkL_-Q9weba#ie zbcX`c-Q5z>A>9blO82I_k*+&8c+UOb-}~iVpOn4#T62y$YL5AgXXP=ip4Pt>wtjZ2 zPY$1&p>ItR*mEL@&whc%%7l40O;P}93Rb%b{zN4Lyl_Ck>?;f^;5Ks9jCLine?6LS zBlb<$vfBOB!Y}ZY;&_{_>cF5s3ZozKOAst|;#FUPq{ki&GelGJGqplq)3O-$77v59 z^@~B!U76#)m*TiL;q{96gkXX;=u^$(FrFw!~C&Ivbi)X1)et>X$KVD^od z{`|mcJ@^tja8B#h$5Cx1j9O%UK>nWAhV4ZuAv_KDb6LS3b*>`Rj(8oz z5biwheJ>}1e({xpZz8;GaqPrxyeVp7tC8z4_S>9oMThOjdtAAE=ili>_O$sY3r#zCr+K9+vkx&cPHvt4V)HNvZ)E%$ZN8o zay#X5(_V)Q2gm5zkA;<}@pY-k3EGRoQrhCiO?^@zjuE0rrkdG5F@wu`6~AG~$gp7er^ zeQRA=V<&NX@uK(9o4j)#Z}xelasDR8h-T;0!aHhlwBGFUK9Tci$>iO0gS3?Q?L;Q!E@$fF1W7{*uyooc{C!{Pb+!0> zdF$h(T4ZYVEoc7OXK+o(Kt+(Q!UhMjAYMjdt{o5Y=T#I+{E)Co6j7|#1;@Bsm$W1# zRtB@-o})$U+o&7YQ93q_j`a=-`072Q6vqiijxUpSlDu0AKF__!EbGr*i^NzV;D`5i z5=*dal2mw3++>#Lkj|rarxzBzl3J8XNQj?B&PifbqQ5Tj@=RGE?};&=^GP^(H1y*dKBWdC|q7spDKGhJ+@nay;1p2cy?HH zdjj}GbCW8? zljxL8J*nD^fGQES9`Qkcsu~g0@q%XF_5gwnxB@>zkF>XhNGwZ*Lq;g(r`GQ?e?3AJ zd)0O)qELefYKaUx(=NCi!)tyYIN4BZK=1~s4xWmlqOb{3x-qq~Yj{e#M_XXgHC1q| z4BwE4msilI6VHnlO-6Bn|3(ITQqF@TJM)W)zLI(smVOOx=Rrob~zwqiB3F}!arWwP0A689L-iiX{5|qQYG|5rsHLyoKOBatTIvVB7;dGlU{7u ztR(oubjy9D@D)9zt}E9`Ow4D(RLX)44;0O^Yin;sgxD3p`9e7V4fdRyeGme&iRBl=VtQ72tKoI1*4wScW|LnH8U1Ll z7Ur!>9vI>kUaFPqM{K-|`q5oEd8dTpdhW8k%OO4;7fYjlj_jBEjLB<2;;SZ~E9FP> zz|wtkJrR<=z4V}q+;@^VxS#7df@7SpDs_9To~N}Av#&Fk6*x(F!Up0k46HS|6s5Aq z5;DI$utRV6PV6VxBp1pDo-+5V+Wb6{boVLSm=&jv=1c-Y5c?p>9z~!bAoJS9OJ}nC z@Z6KUwLDRcw=2r|-Azft(J_|H_<@6ZG)Qa0JZ@OEuCEd6JSozVXZiWtBFuvi<=F@r zxIVM!P7UY!{@FBursY*4xd=;3OCUr-Go5P@y4OZS{xr8gu-ktc?)D)#f0%!a*>p=x zndkNox5?K^F&BnNHG`#quW6j*e;l>iTv3@xpbqYQZQfIOVBFsx+aF#^ zE6?VR(I3wzKWoz30b+*!!*&5zZu2uJq!dz7DHef!P~EfrTQujbPC^N1JVa(Pzh7*gsvvx#~$w>k{%@7+?_O)E9;r)B9bD$B*|uNFa_B2ESiB0*WbMy)MZT6 z*{uT~M@urC|#xDIVv9%?wd(n_UR*?zDy6z^a|j z-0bKu#Vu0gMf`%5Hr^%QOkZ_&gI%X*+bYmXshD{ZcjNh@!K6`WK3M_B*_n#jizgyN z8UsD8pVV^3c%7?ueihw3qBfvP-U;GS1Knqh1V~^NsaH#ED}Xd?=6oeuCIoCnZS6QJ z#8MdJYCmC!Rted!x=o_OCg z4ikftu`?acO}B+;b8&ZjL_JRmaTmMS)VEITCf2Dv(<=is0VmHyi39of-i8yEt}Jm# z-SwD@PksMC|AFH5C;A}po*_xi1`|eT8wiA-?8e>kHfVQPvm?h*TR)mR;C6GvrdrFnggEm*b%Zp?>;-@>Y0i)Zf!{l zB@+JH-Ax_M&8=qKc+KH$U6JL=s>WyFTYv(JAw=hwV|UsSnOC? z2bD>XJ>OqBKb!$Rjl+H;;x}Ush5{%q0Hh)-*JtpE%8R*RPt7Q3+DzoMz|azFF~U(E62bl5`o^Yi<+ ztkihWj_{C`EdvDH{n$Ya{e67!2=gBT0!yfSjWlNV=t8_Nf}lp?ozASMkuos+`nPHYJpp?dKz314 zKeQM%+`F>2cC-z-?g9%r+zzm$pTJ7|TTqeJ)RZ(eEr8ZV9xn9|R`cuCQ~=)5_n8t6cboC9dHp-!WzFZg#$%3SBX2K7zJJ9~ zu&?($vTg0~mHGI_01GMe`h<3;pE4`I^Y;d`Iyq14NF{8^x!OsfraZfGluem;JDSnp zb^f||LImEvZTLObjaNwhhTkD+ne*w&PVdSwJ99pp zKhy4Pp!715+o-`yV-gOTlY$D-#8doIcp^9`{{w-IilMbEPv<8)x>CXt_d(Dty1LeeQ zQg?Q<7n7HrB2g6*Z%PYq68+(czR_!4y<=Q0e^W#_!P61j8(y(lXE=;7Xt!pMGsfzz zB3cwFl)%=PoMS|Z&!_y>)Wtw%GHt_zNKd_gmETS_DXnKv#`b3qWv6F-)DhLe&M09z zou{g+#`EV06nby};e%o2Y;=H#?wJ2!-7Z5~E}zhcx9GikMIw)aL6sY7rwb`d!}w-P zc~bUI(}6ZQO3i}H55C>Q&9z|Gevv=tdLdHtKacopIb5Yeeixwl>}~vPE;fqEGm3WM zOWF7mCsj}$eW{1rGgpil8kmu3GxDy06W#du%gOT#SK7v^p;^fit5!ZWK$$xX@? zty`a2-o_JG{)lh zM914D1vbY~oUJZg zmg4VH%GuV}OiSMhNIlv%xd~lCTfpHA(UIN{omgGZw~;FAbxzAE z>b_O=G7hF-`>2htuYLQ@V#c*1YWJyOrJeaRP8}D!tMgaxbT^fwkp~6vhy)Ao{iVtK zwaE{>j|gT|569&Sd1(+mwzR*THec%_OGMQiikL-1lx!4;iHSu-Bb22<#k>*V;N4T9 zwPe$0V{lGUwOsMTxM*!7dq&eA`y9d#17Ean#a;O5lU=2^B83FcLdcGueU;R3y%Jok@fwu5japXH4SPY6 z^&S?^q4K3}@$v87-+?9LmpVhNE*HZ})T+p($J^Psbkacfa5?4>bBStuvh6+ktqIW(J!X2HXi#WtW_w0bQ(O=rJ zhI?EuzH>Z#sb{AADSH=^ktB_O$B^1b#Ea$8=;u^3sdpeo=)(4*Z*{2KRSiDu zg^BEjD9@F0s-4R@Di=x00oPeet`_~gTN1;SM%ih-ZG(e1Mmq1rPkPHW^+T~>J#Mi5!#&DI3f24Ou@DJAtT{rNoq3|zcNGJmQ5;z#oyUk_R^N` zJt-?^d`I4A=Z`01Z+X415&2Y;d&*0LIqm90hHVf2Y>Eju^Xunzp&EQ6z)>+U*}is~ zicy01Hu*rq{PiNThSWcWW}S-}IXp}i?hmQnrEHduc^U8;&7|@4D44RkrJbWcB+e5u zXPGHC#?qY)gv58%IoS5pc>WnllVlp+MNfN^+1Le}zVIrG;xz+XSNsvEc<=or zX5M|{(%t*f*@0{&j5s8n;GOQ==7^NDWzUcW*`{u!UHZ>Ro*Q}ev^vh6*%ZOcwB_^b z6|DYSInjJ7V!JuysOWIp9bueR!a)az`qS?(xjk4I+d{90)-VKwL+H8LV`S6fgf6F^ z>u+Ih3>rsYhF84z;z>&pyv;cWh+eGRHQ9wR{~jkz^LiQ5bM@zqJBgzHCG!HW#|kJd zB#`v=C2!WxHjV1m((#6TWy-#F2XP;cbkyw6{?2*<6AJbZ$^$Dz(&9$k?lU#1S;7lt zvX3E}m_o3vzcWv@8@iX73zOb!o5q zv$kSEG++;w-f(umm^>R7<*2{?jFX+>M4wsd^#j46S@%bZ;n6o+wL3}T@dYc@>*7DV z^MT87;rc_pdfUg2wr|_CVdA{4R38`lI!|%Bm-(g?&!{dh_MwGOmbgQb4yKo|hL*0l z`IZ9L?eh5?e5Ho+BGZ&V$;epuzBk{15r8kCZysoHYYa?d4HMnp`BL}wbGr|Yg?g;? zd;vK+6(cHF5m8)tOGf_no6Q2F&XDGce^g^qdKQyjiOiw&>=Wf)0_z&_e~GWOPI*Gd z{Wbr$Z-eSg!QcSdNn>bcq{=+Uv(7&q^?L|AO7C{uZ>4s!mjYVW+1lOe7>YG=;OCu} zk&0zZy}xS}alK0Z{4W;3KH}mMukoaZQaY6;uRc=L1sVH?^#qyhejz?qg?Qt>)NSeW z4b;Bn{cwuwB%NBk$hHn&6BD9(IKYIpr2H%#uk%$h*JhyRDCk<&!MKB~xxD$di%#uh zO*;HNd6^UWg`=TCI>Qt5RIS1hrb&}L$Kr#R!!<-k?uMyXYtyEXuwNV`Tavj}RCY4k zH6!i3jfW?!ZSZiMM0>iB1smastIR3F;NlzB+u0|C1m({f9P|8n z6m_GSORD^5rBx4k;TGfYi6i4D-q2VXEk3_VUk6omgJoE{n@%pCu*reMB7SGzQC=fq z9c2M#r=lBkGlF({NQhHdZ;?`3yKMAG2ep&TcOBX0G1m zFL$J)^gA;Se#ra1x%PjI25ilCc4oA_O%Fc2+yj{uh`h(IbFBH>&+~s-ThquGMkg){ zp7YGGa{KG_C62P3x~i5^DY_yD7%cf_Hy$>pZWS`w^3973cK4k7o)X(mT6Ts!e=R#w z^=?*ThE_T0c{;N{*1AB?C3|8GS+ZGp$@M~y_;to0?zK|vm|{A2cc4tok)U9QNn@EC zzZ>1H-PM4g?EM-~H=8C>lCe+-1M9o}V@M}^2#Qq~~p#dvXPI4R0shD$OH z&XicJsYbnbbwwf`xnyk|Ty>aq%8?tT4?3Wx%EfFqD6;6K%9u{;(FjLBTm{HX5r1vQ zXi?#u++o~Ux{bMpvlFK%%>DL@plOJUfmJ?i)q6?RcFRGM*WRq31MS)mTU2!$?O4JG zkn`>Oe;|gv-$vK|0lS5a$cPg5Dhp3mq06t{-$gYb?9jboBQVSs^H=YC zsx1o^RA&>Sbyv1kqnNm{shutJZF_`FSa(J@j#AXi=--_vA7mW#7){Jly2l4lxh$E^ zgxOBtjn%trW%?xMq~AI!oU}>GhgVbBD!y5xy9p>s9oM=Hr|)@l;nS}LJ#{(XV_NEM=RJ*4u5%WT6geL#^$N|)BU}&MX2}|UD#Lq%ejyqL4&JO3 zg@V=DxzWb+>wh$ue~_m`ORMg<#6Q7bH+8(lC@rzM&63;hN?=Hq639JHsBdg!bb;a6 zf_Tz~&b4U3WX^dXd!9QB$eF#^FR5g(ac`~J-bk2fV98?qIBEH$xy5IB^{L?wmEMN4 zPHWrZu8zD}Q|ROyG+h#RVSbwQTsiyO=WYsgVNYm@nX6%rGi-d>zt(!N`6wAMZB6Nt zIJ*kLB|gW;7@C!{nH;H@YT%9jg^3 zR^z1(;I}s+3XvC`AyAugPUE%CbQ5wac?Tzc@t9;?hRk-INc_5}(b0Ex_c}u@ZfRF? zCuU}#F3X`xcDQbz zvTy1CXe%$Jr%F#$i(6cB{j< z^J&zYLJuYSoJ;0E8Y5AJMncCrco>36k!6x(mvnwgPl?O>B$zioB<0jsW@Dt*{OSvn zjSW7r$}9{Q$MyIXe(he{Zn?-i1~#hCvM)7J=rMu&`KOH0e1lfl^<;T& z2pNFg=^FTXJ5W4_t6fTn>=AfpIsy?sZmr-H{{CYfSwAgZJ{-TH_sBwmU&6tYZS$i1 z$+UOUV@GTHc6|1j=Q`f6p8e1e)7B7nP$s`R^5=-xEKEI}b5d@xjwi1FvZaORa$ORt zVtsJ%x;fLQ(LE3l=0rwI{1opo)H#aJ$-|P<_Opvw-9q^}L@+Sh-JzA{N++L=gva*r zY~#1tS5i7?r41mRjE`iz3QHixniDYmJd|1WUqgmRVp%3pQ4a9&hi2Oj71Pf@vl~-H zD?I%a4V=y9BdMz>{UO7F#?YL2^#B)V{OB*wbQxh4qb~bj%u8AR_|cnuIL!myR|>k% z&$NxpgnhiWf3&&%LQJOy?fs}OIW4}(hv*55s((29w=`AWQ2U3V2?b0>3mQwoEQ@M7 zCa<9iSTx42)a(r6_019s6ZI39W`~};*0YUo^EZ}H7R9HP=@^C`t+zS^Evp=Ci5(p1 z^*56SoXEHf*oOIJrE=n@X*q?;qE5_uUhRTytU8slwvVWZ>zDDGMMTP(@Fg7fKr#Le z278#uIM^LJ+mGjW+^vD>EWh0I3M3((r*5t;-=|1c{R(lVQ_U@|NNr5~*6*aaN5+md zrX|XUDaccrzlBGsSy6cSrVW}J| zE9n}2($(b6BWsKZ9iw%C*DQ3x&<(;J$ly}2E;?NFZ8o9E>m8imO>|zv)C~*F-A7_` zuMSD?%r3#7qzovJek<-l7kw0L?pr8JIgqhmTr=F3`dawA;UTvYtQ4B_N^v7_B3TxI57LjU!CLb7bp=k>(beM9Mp0Q35n|Z2H_;qcu4sLf+R9$fF&w}OWM6;pnn7gm? za-#YP>S9pBKh}xo1c=!wFcX+IxcV{YpOR;NkU{oOW@;aJZG^rS4?|wmL~Es#b1ki% zEb3Mixfqgk2NTXq_~{31FA6A6w)@$g>~v=^pe?-enp2LlHBa8qtcF+$PlTmadDX|t zRB*j?Ss$QgFrm9cv)-*e{i!gt*F0G5%fs^vcWhWw8Vf^C4fFsa)=6qum)ZHH++5#O zKE}oEa9SGi!QY)tuO-GpfF*C=#}#lw$ymd4^g422_WgJ;)&=XX^mLZV8?{487;|&F zn!4#REZ*$iEs`VGOU|P3T;)F(MTyvN5Z6C(;`V1^peb4KRZPPy`+QuPEfd z5GR?>4Sky0=miH>^^X&kdDb;=k{G zQ*%nnRoMJ!jUW(TTceexAg3`oG~f}t-K%<^<+@YzJvnVzVN;}EeUOVgtw`gDloSj5 zCk6gNvC^+EAhP4K>3KnO~eCa+~Li5Nt|m;g4-hmsE+@DW+OilxCZW zZ|)(Qzj(%VX8gx1+$17Bcpc4#6acdqLBIUOZ1-Gn>>Bqa>K4K%dgNs{fuZXYR6H+- z8X>z`OJAo)X1zaEQ-#UdNcL;RnQ=g|lw}>(F|Fy;Dz(AMb{@ga7jN0f^5*0@>01uPW*N{v~lLflN{x&Nt|?T+MxV9TyGf>j|63$ zHP%oGm)G1EKH8Z97t67{7h;$F!uE!pu?V3mW72MH8V61`r>R4!SQ8kwB?Rcxt>|WU zk{pq9OyayqaR~>{&ddps2X}hNOx}7Qf2}F8<>qF`&jj%;9KO4dE|kQoR7wiA^x{j) z-Hy$z_7M3{?Xz*Yn|04q!ehMyQir5qmp|lt{xxN*^pI9|xQmt;Ip4c7R2e7S-i6mA<3k34JGnC^k-^A0d1!YXMA~*lHEGZ0dqnIN-G-y7Dd`J3O`L^qM(PGU0_sNM zJ&59Ewd4ain~4@vt@KNh-iCRtdbc5YXmh@y;*eq0Ems?p6ctuSpEIq~?f8Op?zmTE z%w^TiWQHHE+HlxrPPyZ@DHXOKWVmr4`}WEq`&&4iqLRGp5=*z8%tz;mc>I#bJkukY z3JQ0Z2^-vC4_s`DzB|p^tRh@S!iR!ZXdSUSqM}CQ|EK;$B+4+E* zV*9zw%)D%-?t;X=jTi^T5^7zv-SM;rL{NKa#x7BqR$j%&C zC3uWQqYL;$aFrxy6*XHx~>E~}5=u1CGWpX7$qffqF z#K9N}@h7l2hgck)8kQYXBQ%j*MIQ8HKR#<5LiqNgtmTe9Z4#r+r_W!u*%gm_#B5@< zA}{*<<0E1pgG3(Q0E2x?!x?1m?yxa!yD=T-)e#tdlW_7ci1S*pg7YfXBU8tBZAlkT zg(X33Lep@$i+=o~5N3yr_X-Ocf;%|b{pA-cZK4xoXw|vIF7kR%q1|la-(2Snn!MZob#oHamdzUhGM9I-#u++(Dn;odr|n6#?;j`criL ze%+B0bcvxjIc>drh0-Q5SIaFopXJDx8}N3qS@h2#LVTy+Os!->8Yg5hp^vg8U25M?`pzD zt&3{Fq~^_Thd=8f8)HvN_-XFmgdD6JK^rQSXMtGg)RW0&h7X}DP#hWk5sQ!RQ6Cv^ z$Be^5k^+u~<4WdbwfhgNB>CL|89mj~Dd%33-Z8-IAW;^Cs10i|RBLOu*8l%C4InQ=pEtCcdSZd%uzMr39^Q z7k5NR%Wr3rzHXfV^y2o*cMLjq-cp6K_l_n;@Q#6 zV(28JJ$86;{k$|mp!@l{FrPc@r+QU*BFoo}X*FfP8$V-T4G-bC!RGxF8ArjDUzv(M7JGF7pqknUx?J* zot_$s*O zH=}xj?J)nq+S`vbq#}RF@-vsbA-e65o1{Mo$jDx{-e?LhKO7Br@jhd=JQcikGiAd< zCOe#{xjOFbPb;w&=UwXXqx; zQS!T*{zvLh!!@4$!`^w}UB|T!LwQo-;Nmg2k2BOMXx!SSV(r!XrRb}42f7VE$O4Ej zDuX{zrB0+&eTJqj`*S3p-O;Co~>x5@+N2E2`c-NYp`@uVB&Y8&tKI1)sg>WcsK;o}_YpNY+Tzxu&$m8PQ@pgHmr6V>Cdn0>DKnp-Lnh!d{M)wC z2B?&JhKAUoC;DQQ&v$1gHlE9iYEa25PiUTA^>&3Q@^qxh*d2y6r?&Byv^e=senLF8 zyS*Jfg*6;k(QV`rlH#3t^ckE)uQwe&DYSDeyF|hYP9w352#O)G?5>CQw1Tb__3Zn` zhv1Nq(aRWGF%3%jH1#{e(&og0!06S%&pHF_78k1|->)2U-E^%*__+g#{?zYyC!txW z)u!F`@w5e!z&TqQ|CVt?`(@2Aa!V1H=8)%2#4WJ&o)Z7|5Mx{nXCbmOs!XLTJl zuNOzqQx!8?1R=-^3_MNThN>QwBhx3gRm%-S^HE#9Lo^p;aC60x??HH z*D(Il+OLic)tY9eEBbxw7c1}09XwSg>*{BfhME#_+4EtbdjqpHs#;qGES@K z0L7^Xy{31350xn>QpGnYW?0>Re@FamFb$k-V74B=XZe&@u?0V2PuKr3PJJM|ASW>^SZ74jXF&OB-WXl4f*Gt*JNqGxET zD^lX)*JWL&hJ^NTLRdWz=!YO8^-WWB6vIn%Hr)gwq92&KoL#U1UMOekx}m-uU_XZ2 z8#Bp6ZGC_=1=Z9N0eA=i$y+bB01vDe0iep`z)l*G*Cl-~} zGT;7P(Xx0&+v^cTruWTrOQzL~r&B#4M+0lSE<|O_It1G}mI?gUY0!obvFJHM9Mk_= zrfLg@2{crzsj!BK!i=ZGX`FEheqz9qhy|4E!Xr_5`c@}OZ@upf2f!60V*6`>c8_g~ z(+URr`8Qd-wigRiv^nXPU;CiV4YjDCKo0H3zQ+M}8QOb~i)j_*{NPURxl~F-PqdW0 zO+>2v?V@RftUFp+i9ObzSKS%D^84I;X92G_KmAGhezhc@suAhR;Yar*P9L%NN73oG zF#y&3K@sR6%QS#3vW!RymsCKN`SQ;_ehdoYghwOK->#|pa6$C7Y@g+P7FE!YJ$s}| zI*v#LbrB(#TYfUp$Q~Df#UkTY0bku>AVoSb5-4B^3bI`6@TcT(l>8xKnpkK%m8$uq zDsO)wgj;n(DbsB)%%y5NR(_vB!82RunW?H zk--uIDu;Y!k9dvUwJ?0H5~1o_OT<*Rp!1)n@%osSbM0>1;kGm_zDEGRP-$|w;XCaz zoyV+t{EKbayi%27wy!gSgh@U;>R5@`WN=(SROhm*+h*v`$0DqWHJZ|a1WTvpFgnzJ zZSJIlOIHp17fauNp6<-Z32&Wd1ExK*)RFav2DWBW`lf59mWu)F4MS3zr-&zqR{*V__x$dm*@>Dxlv%L(Ki#!mxQOerLW3(L#?O z>bYjkp_SWXp@v2Hm_5$7phaumEmC>w2bMNKNdxr_sD0GXRF{+?p^=LkOJdcPW{_9) zjNxjyI@1)QoqA9}U}j2yI#gvE&stc=AC_=7Z}21esbO^`v`LxSASToMunPL> zkOU$Ji&=z|58-vu$2Dt<*)B(@GZ>slDtCS=^YrbYMAW$X$XC0D!KJ!HStkQ#5C8)F zf2B?E3g;+ojpT)o=K)+L`Q~gL^AOKX6%-~1Ad&ztnoN0lS!MSmQ-;D_>&@V&z*cx6 z9K)voG39o(D%0tX!;*z!oWVsM?a{r3?+OkH&IHa6JVUMdMv93d^#*^ZzV`c3@}$H+ zaDm%WmDjs+ol|9RT8*c{(Kf8Hi1KS$Mq*ZAm4FxEZQ*pQJb(7iHETzo_qn*W4B@6c)RTM3_1 zdtx7g$J#4>f6nCTNhiZ71qT};oK5bqB|DQRi*zbt8rbwh@Avgw>N+NApx8WG$u0g{ zJ>Wh~^#J9{IiRGxZ%-Wut|XYgS*$;T80=kXhjZlu3E5AvW=HybDP*)zF=p!ax_=_B=#E7=3?CLjg#Bdm(03tf{xD+qp9&=m!s<-{FG;=8G-7Xadj zU|)ZLc=Oqsnd^&KB|wz;hI(BB#H)V9=f<~}cjw69E4*=y2af@&b}>%F3g92BFv+l| z{>(j&Cw1z@xXm6Yynk#c_qKtF0BkBdgXgs~0IUPB46SO76_UdgnvIlzHzP_uEhfSc z--(7$2`#N?=UKA}kWC%@p8<`|;;KBM-Ky=aLbLVh5IU(lM_KzV9+5M}EwFsZ> zRDKElj(L^=(lRI$CD7p?NBg_$JC0heX?98zGYy`r4a1X8k2hZSmZ3JE>>KV0^)u+S z3J%60O2EyD2*WmfS6($4QqYEVw(n@E`E4p}l~z%~^naDL)aw}UzJYqA+(49;k_?LA z#RA+>A65dZRQdD-f#nsFypYlKx;s*MhtDr3Vk-W6)g<`mRDCFa@z0!5i`SmFZ?YO# z>~jKtHMGP3f#Gondw}ptfi01sW;)xF5gdF0F^>+32XDUuUhCA{17#?I!hF}5>U9Ab zCKNY&5+k7gb%9U#+5UpF75s&ok#57O{SJbf&cU97IC^SvPUEwFEgIiWB!eywpTHf5 z)bQsTWIpYu9~*Ose4dTA>>q}+AKhMNqboj|yjUq{i9}=e?Wq(x?795RYHb`5zigqV zP~|{MpZ?tB+Mve9c#O?)J*CUDzmE2+>GPTd(bB@U%ZPMhcRJxF^?kvFMye4 zwZ>jl33*OSN}u<{yETDtaH%vPISFJxP|!4RjY#f*2=p&t5Y=f}@3nDCXwGWg%5Lx1 zKcaFcC5)Sf?Ogi!;nKF%Ps8BtC{P~5iT*PyKumuiy6pT%6bYY@%@>CrslaRITD+6R zS&P~LYX)0m|2u%PrHQTVTz>z-L`wC&tx|i@o()G0dBf0PLu_n)bP#lY=au z&14Y&zO^Iy_oy{0`wFxB8~9A}!V%_4HE#8CPh@>DUp$*|`oAd>~lJj_5( zoxRqLHr*qARLG0(R9-Mx&G{3OBDk3G=2_j8cgGfhn6|WqaZ;t3Mi*JtoPHJ4Dy7k} zf~JV3M&N{Ys@~xn@a{Q&F6Wv4_esKEdav47^mC$hv2}>Ge7<2wtU(k-e@Nsu|>&A2G0}4nDMWme*o3AwL6*ZKVUhX zYLhGL#l;0k>+9(3RB2kSZ#YPH#A&?r;Sx>28?RYcie&V+yK#2h%}M*PZeYnn$mYj2 zO+!`cP2i~4U(2yL-3E!pqsKJCmp$L-Is2w3n~sMkPsNW&aT}r&iPm zEN;jc4;uR}+hD$&NlQq_K02=NQ4iiK{&}ui-5f!+fqfEfPYdJ2w#<&_w5=@Bhv3IA zh*f_H|I~Xc9o?^YbL-7oBu-A8_(a+G$Wu#&?q||||E&3AwX^z`yyN{kxp+xD=REsY zb48kUQ3HU`ODHZi)zS$tkgy14zQjggvQu(g#g7$_GB*8<<}-US-EQ|Af6n1|3M#UN zeWE=;lJ)Gp0t_Sa0`K1W5xO1-K7IVCLFf@o4(eA=ip8SJ!3u8RQbqdboFD@wQ06l1 z@7(*fLTYN8Ec?iLcz(TPvw+|Q@I6;@1L8K3&QCS-&|e4y6%-eb5jf48I$^_zBvuab zX<%TORz*IN=6(?i?s9*l^)$Dz0ec1-bgJl&+r>2%+w3r}EU&lM`K`hDVPM2yy)Gel z)!a{VIH11?u~E^qdD;M2OTO$}ZkN_TpOU0E0VsaDYMb27sOb6A^#P&bFTk+3(*IKz zd|(4u1Lufe{b0;uf`^jj&H;)t2K_id=M4RqyriXN@po&8)%^&)?#63ajC#=*9O2+rQ3OZ zsnVkF0l3XzV7_O|Lg_GJu%iIy4ewD0(KM7KY(O?&@~xxe_lNNix^ECyS65GgPr#sN z+n*xrAU~oEdu#NP&}9Xc4N#WlgsSLxjO_uM9LlfXzboIZYd|W1w6psGm+#aGUdP)^ zC@&EVgCQ*~ts@|-jG3x1LP*}!RehsKz6e@8jDRczrW4qmN}x}Kd4+WCILFl>BoG80 zT`TsPAHDl0Ehu9gAes43b-=_+a&!IeRZ3n?E>lL^qk2vc24)x$thxK1U3%q%VifCK zM*Szw`O9_wUBCzVrc(dHmTjc149v9sb-ZT#z3RYpRRN0PNx<@RFM{^*VvVVPIq#OX z&t^MHAlR5MV#S>S!}2baAqVxi5)}WT6Nia-_BJyDx|ni+b4@pd`pp~Y3b#hY#K&*@ z1IimmKnofTcRrbZ%7P} z=)4sUEEyQ2=>mlekKW&>{F%XIqZlzO(@wT_U$>WDwn>VqkwY7?yo7`i}GCeYpKY4sbxU|t3O0KL5B3@(B&b}mEppdUh6wts&Z-2+@r*r*VX1z_#q zI|8_X*BqGCzorIiztG8HM}abm$ML)ySX7t@W2ihBx_Ixdk1YWC7IqXMrwJwEp9l8} zVPZBJta|vpxbspV#UxnR2sY2d^gC0i>7XlU0LFi>i}wwHnyi;bv&{R!Y@kwu5&CY_ zhY5zk&G{_B$Ejqay5Sj_^BU(AHnDfgQcFiXNVeZy+Ps0`1+OnUte+jMC*Ra+NCNWRQ)?xOH0eGjSZ->K`(E)W#Be) zdEHJY37*Mg1DgS8VqwuReCRR(KiP6K({lfGnBi7HS_YI5m^@&NGMIfwbp7hV%k}*= zEnsrMn{?IEYSdIn0~aR-T$~~}egD_B&g}gYu>`gCPjG}0L+gJ%F^cB_?7gxj0DWiz zJzqe{bYtp{!?^#~&t+wiJm_Lq)q0xOJk-`qzX0E(T&i6R3x^B_4+KP#egY;B*qJct z)qpAAtKj5!%j5?Krd*=gV+<&^i0zC_pbG>wZYZH245B!QDRKPg8UKk~GKr-J`Uq@B zovX`<2Mz9Uyt1sJWM&33jcA9?A{lc1wYRr-2dJDks+@|7$$tv;pX7~lm+-U~WbMZ4$T$Qr<_5}*bg zVVvR!rQqDDwubPnuC6N-J%K?~2Xjcl!FloIv7`TEU3Ac#c^{EPz(4`h``_ru9!CfM z`ZwwUQgtx));GtyUUyDXiA+EKPE4;ih7zhuK$j)mj@J1fA{f*r*VDbyppF)L(fp5$ zo%ib2p#uYsa9igl=zbAkChzqG+ytrye|`e8WS|GoXwL*s7vJ|F-9D8fSq+V3sN7** z{qQ(tPA~s!o9Dm7Jv*VRRdsmOM}NOuKSsyLw;(MymiM;Vcz3gJGGUE~0M$7l)(Aj& z4HVT|&bV*9`2QnbgZbGysF{HQ9Lzv&#~(Bi>LolD5p*70G+?gp{;O9|xoeu6Q$qvj zmz=Duw-fG1(f>KWG5|t*u;W?xK4>I^5o2_4aNu%2fNIeH;{eYO{Q3L;29=@@EQS7Y z^fmwaKk#ada^P>Fm-i1|$oaDeUrw%fvNZ<1Yiq#8CYlU14uNncAR`kFjVqkH$S{ZX z_4OyxbEr^%6Ou9bJ%fw8VFx)|hc7++K0Yp;U7Zl|pMMG?5kiU9G^Y zh~E&p3aFzabJ;XHINc0Q8mzNM0Jp$cpjZEUjiE72j!!G6VcK*3)QEM$FR`( zAx?pkCe*`8n5fCe(K1Sfh=FX0yic0*yi&FxUrhmPi`<6nwJg8yc zt00UwEg;}OsRlI1(71E&V1TDB1$@)cga~@I=9e!m`afhd!105=9E?EW2Nz&-S7G$; z9KZ`mz`DufKRCpmXo^aZr0I$QmN5JM;oN^U37Q0qm+B1NdnHO*+A&{vw0o0et}N{R z*I5GgT3rw-1g4$0uv*IgvtD|y6Y9S~sy5>P2~sY{#`i(0s;Vl#y!`jS?h3p!VifSV zJdAbGBn~GZyfJhv?+BYyR&PqCd7h z#kyBfQV^Z2^r3k!4C2gP-9BGzOw4~$zUb32zR&JBB*1wkgD^-W>%m)E?sKr{^mO|4 zArmKTF+GsJ{K1JzwuAS~IIR4<*L_{#S%(R&zcMeg zTbKU7$s+r9RxmVHG8pRI4#z)u&4;wWa_)QRf7xPhuf)9&Q#{7i9R0n~*MY8=aR(?? z0?$g|zD)~Md~i<|4CDJ>g=Z=R!~Xz=e>w7=e)DGEEqyK%2K(I+!iyDpsA=ag0FSG8 zeSQcH!@y8XK^|TNjX(GKD9`_C@2!KX+P}Z?V_yTfq9RBcAPN#nw+RB0(s4k#JEV;Z zh{TcZ?(R-SI7&($8tDe%(D|&5*U$Ge^E@-ZncqBrK4+Y{_j=FS`@PqD#cRFRd#|-U z5x`^q^9`bHEH{r}KYZyr!GGg(XjuJw8#2*F9}N(2)nr-rKMw%kKk`Tn&>!zVO4`bY zfAR0$?MFNQixG-v7}WiaU}Yi;pw&{C!6GX!ulw76_jfn{$Eo`AE+Zf;;f1|@8E7uw zZ%}cR5x@|8N5~ldVC1Zl;YK7D6VmzmHigx7x(bG26!kTKMD-^-cMr`?ypE~&-fV@8~4T65fB)s zowGlM_vG*8CeJLA@;0D_4Y$a)`d95VRqJK#+VIlAhVXwN4R~x>*eqN3H@4ibalb?zLB0v2vO7h-}0i&k=)Nm>qqiUL6<#?BV&fn z-68+&5gXjeP!0nnX4Ez0zms^FQiS;9>n4@U&dW#nAu91LwlDEw-BUm(Sy@?#)1BplZALMpx#gYjWe?k*HRFfvCycgshvCh{s_-1 z%Za+BpR2`9!cxX z#k>VXp%7G=__k!k{>XlQySS=p{$+%5sQvVskBV`>ffKpfpR_1zB=_x+|Fdi$)6*aO z>Up%jGL41{i1)N2b$``M`856P$>Th^t~C^d9^l_lEXM!ZptdYRT|i(hS}LjOhWxYa z|KR>UkD5yzbwg%J-iHR72;xk5as6*8E31M(PW(NQK=YhX=<7U~(*3IcWQOLxkB} zw}LulZWhcrD#A}^2L`EHnBFZ(d z-XDKk`(~Nbq@S&Y>(B|p45MkRSbQ-IcSw(B6?WWQ>Tb%Ze4wEc(v&nWuznZt>g=ZL z){})Tx}0idXtQ>Cx<^UR0QyWBni`PhE;^3vqSaWm+`-zCo$0tSkj$#d zt7SRwZWs(SF%!-4!N7^aSK|%6D;aQ|;YZO}+N1B%rG)_5!|Aj&F51N z+d&G2cFV)Zjs}lWS1ml~?&&cEmvf@H#gfMc+KDp!{g?i}I{{tm_@$UnxOvJhd)2)- zqS;8R*7oJYUuR&^1ii(Ujh9;@xnJf$ht**xd<$HSR1i(Q+l!U0aIGme9pcd#0Mm5_ zpC}(&BnPe+Ls>J|$Pg40lY`{e&l1{?sgFM?(96iod@*XSSZj{ZFHn=8Xm0EPoS0Ul z*rQXn9;`wmBDuGglBY#`iEeH#BS-$pllLxA>Bt*LydLCM2HkVrd57O5QeLvF@5KZ0 zA}_eDow#bWU^Hd zFRw~QjFU{9)N}tpKfkyLvvLu?=T8|FvOX47tu`{)Op)o$*tH2{xVyU_8!_9+R06On!E>c&aV|gk*6msA6>E?E1PeTsYQjQJyl5;WzvmDqCmBW=7Ra1es z*&n-FBc+gopV_~LpK9$^(u+!3C+C3fk55j-V9yA|o@Xk3s8V9miC}FoQ^%#U^2~B& z$IV5U9vE@jSVd7Wbv~y;qNSx}r^U>5V4Cj$UvnT96%}=&x?vFGs;U>`T+CtI8~Yw? zhe-8xTzYzXp~(Qp{vyq^G*B%Vv~AeN;y}t(NlPji@eose;80XpID@O%7GKzUcj|oL z^!i*)Po}i6F!oK(%8Kju)hMf0?*3eh1VSo4E{%cnBZn@CF)-Kc<=gHpnw=` zdILf{L_1m376bLNv!Ex?xydK6t28+|X#j2D4b9A`f3Y*w>{)I%HLmRn?mdFx9`ZFN z>MoO4K#fGGFv5VMd$?U{lEq`F?2LE3$vz4t^c#G4Zp+9F99hPV*e!J_RrQN@u!*7N zEtbl{1+?6+5mL)bm@d6B!Q=6ms+h!|D}yC9#ZxQp?x(BN)TwzlhG#n`*BveuvtM}9 ziFRaM;zOndVsPlr#Ddn3Mj$zQdNyc>Q{_1EfsdQ}MDe;q71mczJGIlFmC%|B8R18$ zWI3`a5tCh6S^1BNV@-p7P*6HBgGLp$v)&>Buo-A{_AIOJjJa0AAcz}w7pYSEHovg3 zwjr3~gTCeQuF94Tn-USpQ1k&4afG>j@czL1&e9M)Kfd|cz|Ou4@)MtymPXr(GsY1C z0SR!yJ9V?N6||d_Gkh>TVm+8vFOtV39D3`YcBWdkZ*wd(YNHB@wrF@`(gv^)`Mo$J zqos}>QE$(ptq};XvAI#^_wQj^*jkU^A(uuv;3!N~?1SE5c#!IcnV-%c! zHw!l6=+gs_ju3llkP*MIJ^Uo91Kd29Wd6d3Wi7zH2BP$0ezE)DR&zK#{^25Fhhnv?CGn;%k zeG!lx1V1_(Drktg^Ye^g>Q(gX8`)ApsabWSKj;Kj&hGCoyJA$DY+~{H`uaaO;8((e zJJOR=Q@4%DA+|yr_j+G(7}^Q?4EKhkP5Ln5@;wg`eoOFRg{Zr`n;?C$V!4_er=p~$ zmIdnasGb7?BY^%3>5GV8gx=H0d`Ann^tYOhi$%CG2mBdB$E{zlO#1R^)9|mC9Y-=? z-lv@u;@j5ah@78CaG1!bs0_qvIzpHN=BxTq5eyphR+%{L5Mzefke_2UBY14mZ%an= zr53~a({i}{I{dF$0K)G-n&7h4EULDjaAPn|6Sr2ko#3w;SN^eJF=^2{!dX}U)O%3I zSjfkJe~g8Hmn?vnSp1h{N1<{le^XBIl|OY8K#R5rr+@VMLwv!1{7EDL$Ni_Cg!=D` zM%rGC(-!-47NFW*PcH@O86e{Siqw*eBAm-29>yO|nsY5&Ab_Mxm((35b!06qE%mZ1 z`s>2HHE-{@O|5J0?bD<junNvEsr(A%! zi6m^-mxa&+;|6_sm)0GwoO25d3Sv*xOnOCXXlmL69PhYqjPpj{>h8X38$8jQfDhn# zfJ7~?s2KNExBO1q*xX!rH@&khIBmrJ)g21L4EVHZK}~0FX-S^#TG5RMknO6oU6a$( z(~B8*e+7t^;gzJRYqOEV@;{>fU>BFqxw)@7z{4-E~`rL?xR$Vp1Jg{J%U z6$|OWuW5*9ttwMegu%`RyHF_ek53b%**Q5kxv2;}t`K^7I1#{M&2w&#oqf+7KbDPD zx@tw9-n@9~_*(&NLVGGuWOrnvnR?jsGX2X$07BB~k-FdX`|$3wMhwJHnK!yDu=<5(^{Y1e0a71z-`gq87WE`F>dB;*cpfA=zdi!{~#&>DPcBJ6u>dxG#T zAB-7tuZ&LIj(pn2p5kW3k7Pl_ta!XR*Wh%Xx3_nq<(jOnSX{^6CstP0ps1*!=@l4? z_>bUqz|wqhn_?as!aB$pq;$vVClSr|8?j{O@58>9m>{Xlvct3=s zNBvM>yVeZ@o`{#mHA(2nq@gh@1tIv0yPTYyqh}7F({{njJAkh5T=PkWVX9vm)<86tDE7{oD@1H|>_P;%!0+pB2+S)3B zfd{F-LhsQyYW{7~5*{h!NQfGLHy7OSrI6a zH}~5jSF3<)a(f&nfQcvwQf0!0hHo(TsiqcYzw8_F5x;Y}1FR_Q!D-Y(;^v!^P{t<~ zn6l;p^vjIFvStn|TkVwe;Ui|kU7#}@FiH1cA_T9NGHexeap{Kng@~l$D=YaXCnji$ zT%6Jis-$3gaav>~1x!B#mz$Q7k|J5Lv0;ngMJVRP!oq5=hZzko^cQ7}V9%dB2Q{b^ zh%K+ydvvw6zw%JSUlF5oAkd&)_ic(wN{%$PHdH&CJVw9xB@MAONE+g)=83Zn*9Ned z@Xi|e1xzHS!C(_Wr&H8`eLh&hxJp5(0R)Wh$EmyWkB)=o0)}|`>Qx+^zYtC=>4`j0 zlah*k_wL<4Q5f(tIaM3s!!ElfLh|s48ZSJb-t33dAj9_4Q39A}3DS_^OM;rkCKvu<8~adB`2)DMYS>^9+0T?o7a5SR-R_Q%c3NlW{| zlHK{kJG3PZTie=jFyt7v4{;~I*9*k9;o-q=H}+}cLYdI20MG#OWiMX_fWyeo&mZgGSX)C7DP;5D7eUL(0tC6h z7Q8@tz-a=$4Krs;ur{XA0Xaf*LUz-^(u*-Nn!f34+fH+{v(k4i%dn8j+S=Nx^5Pv# zN!>weL4JOp*C}A<4Y2oQmH^8yE-s!Ou(q?qo?eDM_UUU6^q*i6YUi5+WysEE0>ht& zE{{;)Ip+k>OtYt938bve6jjyL)gxgNkcxoEZ3JpZqqzZU}@ZRDnKTxhvUi!+On!p7a7TirUA=2V9Aaot<5bUywXphzClxVT_17 z4kYmrf}z5k8opFV5h81E&k1U{==%ElRfj6L*|pBHU$81sx{pDhHKd@gJ4|fpj5Re) z)Z*m2bo<2*L>chg4QoUss87)s>ONe#13rO~Qo!kT?MBZ@5QrjR1h`a6;d6tJPjAA0 z`>M5(laqt}#YIL&y2QZ1XIao?z*SdBNGut_!#@;d!LI>pF$0H8#7pHX?otpk5PN=` z#zS@dPI2|FBD0KWLax+Z%H7fMTlf3>`w>ZjTLT;a4nAspC31}&{E!WBm#K+~Phs3{ z^I_~URGLX{0-gBOoknopr6=!z{wXObrI?(4N9M8a;IcL&%^lS$FfX;K8+HG45uz{& z?NazP4VF~EhQNFr-Q1q=S)-PaPF#of@L*v{2{!p1HNS%_Si%$KJwZq~Pn|jim4Hi_-}7|G@<<HNuWccoLEI<>j!{jslf9qYJ zk~IQ;1$^g~%a>DQV;|bv+ZWS4bbs|Vdm8i}j8z_QvR!=Mxi3w3rj3onEa6+JW z`SH9_;`p*42sqNAs!1EjJO4z;oygqdfH=A_IX|`P4jTiOfw8Hbo}O-6v}L$;>-}K2 zX4h5vq)YSiwqUyFf~Wop0AnR{zY)@8({We3R#CIcPqJJ3d5@pYcl6~qd9eirFF{JI ztS_M~4+$r-GY(aG1~l)Z+|22fa;Bj^kJ$Lh^rhXS zoY!6&YWFuR>wnms_R0V9UngnOB(eEL0fZt+*)U?d(eW2L`kf7(lF7E}RBH6%-c6{qyF2t)i|(jCI9Of^7>Y zYnq9^K5TjHowI9oFPy%Nnj6#DpwZE*cs#=8Ks+i_&6ei$`Ks#CPodV=(?c@=tQlDJmvIwK zTPBbY78YK}0P!HSsoP`!M9^;?ZEcng;^(#Zl!Kv~R9%o=#Kqma&i^LwHnHc|Y*$@u zfjQ~Xg@)|TPfwCP$M!3hI>88O@*8{mE zRm9SM{`2?-ctOzk^o$IIamah819Al(3c?0ZMeyTD3B83WAHQq0_YpVA^6DMHc}b(= z#-#4ZdVc(1NgZl`lXo2+=D<|K^Ss+*0M}5Y2?o=euq>Ncd5z$abqB3_5v!f&^jTH_ zm$j-|r)>D7&m?je4eeb(Y20gUZSCQnj!XbzaKO~sqr8F9S=Ro5k4E?frw7;d|ky8i5_Um3H^h)Zr_|XewK??vJ%UL&yCtU7Mv~sX!ocCHX~2FZKpz#ILZ&z z5zWn-oSC^$X!C&FJq|)k6%59DF?;$`PxF@q`y7L+Wwt|8+t`TRq|j%gR)4=8So9?b zE*Q=jpOC4RGI(HS+5Gq!7lrT8LZ~`;yu9_wn|{w#^Xim~Dk!D=Q;|+?uFTsWg}5Hh^@kX&uG|#q%T&>1WhJ3thnHDq3_*TGsnt zFL-Sbb>q{=`tJv!HP~k9VBP5idjig@oSZq+y6yR!%MY_BQ9_sScK+NCzcjZ)0bLVA zTM!q@%`hcv`+z9reIJDd&D|xt0-M|qiG}69$DR}Q?265gv``E+ck)w74sP}+Nu=?7 zs5LcsnOc}nLXqVrHc#NA^fhmIP|?itjcj`bmw}60-DoMfeZfOBQX%6O?*MrB5eFzL z1+@Lu;X9w-FvKXQ%C@6eSBCtY8?i4vs~4N+N8ZcyexjL&rdi)#EME-LwWzjxBoK9p zdj5{s{4zV?nB+0*wCh=#oQM0<5ZQ?vo)i#q<2&AvNAhj6sEZm=2l`dB zCFo6*M&XPz#$ZwVdvmk*5N4xw^8SN{r*W3ot0Y~$T`8VR_l#9+r-c2b;uRbxb*H$( z)X}j5vEQMgEWl=ifEsbuy!d8TIW$@z(PpGM?Xly=g0zZ?O3fkynLM9rx;@8WFR3{7 zF4|&gOM9Z}FNrY8D8;)rkt!~a*i>PZ4o|*m|H>1dU!$3%v^EdeQ$aytp=m*zLwIEk z3tA|oaJ%z6h3C^ky*4UJZcFW24$yS4BXAeqi>5{rZwn<{3r*K_t460AsA~0lcBxp~ z-yDy;rd2IXG>eY<=d`qcYR|$^5G1_^(+hrhB z#1zPJ}nr${^st5{&P%=Pw3^6QYk9k%VAB%bSIm{T`R-F!RsSwQYeRU3Xl1+5>-VdwIm+HyAjU~ReejMb z>u1=3hw8fEL$x{dzJ_wE&=C6#EJ|z*!qGVKD4rt8yjAUIjgUyglR#wV2|b2NaqC<{ zp?aG}&A%>I&sdge8a>t3(Mbd34M4{E;|GY2P?GO1n~u(CZf;f_+k@AHZac+ju-r-M zOj{YpW~8P*2YiyZ8GD8b@;43LM?VH5Zv)=|i!PSX78COV^}ghrX$&A9&H%v%Xx59? zu>ie;9Jl$k-%L0Ki#u8g0dQviU34q#S0A@Qh=a345Fmj^9khZIH*$cp;dTsAN}xX6 z^5Eg&*{LiJW5YnPAp3crFe-`?CjdWc;T-8A(~I;oZ?psK z2A|C52EK`j$gRrZR-ZnFZH0T~3nxgs9T7yEVaKCWpBq#QL5d7-f89Y2L`2@4A{Kxu zW8N}B-BEftVM0ksxt}d&Fl7p+`=RCCtXYXU3MFw0h|nD!M)|4fX@<+^(jZ~{tnn*R z#0YK-muv7L7lso&sGpY$T0i(Dg8MV)$^@5!a4|&$9(B&q)Uf-is+rKU`(q$5pi=BG z!?{2)49J&v?~X$%MVS6GG$MjrCBXkCP*k;PdL#^1mX@+m#sOX7j-sQEAOGqnJA%{}W5%s{ zHO5C9^*w;_!c6jX;RLN==D@f>uNJk&k{Hy?zX zU$m1&*h2&-2L#+o=@eNyjmo_OCh%TYf)=tcK>TM1v~_jqA=v|9m?;LtlEx*gSt<@vgc18*BfVqB8nS-F?gYx41nBGwVMTV;C5TC=fk{YzxhJHnr?+X}d{6VirPC-D zWzakpZC#lN*v)}Q4?rBOtN{q^SbHY`=qmHMlBUUA*_~krH%gnUQ;GF!hbwRv<0}d><$y6rF%Rxy#EN;R8oi6Wo6vUxeGBP$|BzNCA01 zFgv6m6dnZGBpmx6DqqVRN&S$||0iqz|F;y-kSnf!HwXl=8}6`4Qc(wHNjmU_*V;8G zMIl-~H3E(R0Y#re;wBo4In8ftzxF)tk7dE>Y7#e}|vM?@fG@>5E#)MOB=P2XOG^)e>9-S>3GiI9oRjt+&nq9u3Y zid)A}KMUif&aO$vKWpnTI&5uGwzAUMdotaM8#@zh(8qbzsGYrFQlZNBQ0INm!3=uH z+P+Mbxl#mEbuE`}foA)o^U;AjmQ{g`)H%W>+=d`iXJx51Hbse(xijC4xw~V)h|Nqn zPI+VTNAe*$X?M9)o|P_S{-c?(lv;0gEyikR;+8n5#r@0lZ=VhdY^StzWQwW}>OU!| zQ5IWLN>3U=XA9%1Qx|P)99G`iuku1WxYss8rV%f*Xqgk`^N3Sqq*SW!%Mp4!iMG%9 zS_2fb1ht)~x|~(ItfBZ!q-jf~X%;l$BUYVjC_>^M3`a*3b=t{r! z%xp;McZsa>n?Vb1M?f=n-W zr=yK51@PO|E_+ zHqxWqN*gOaq6VwpwW%Msp&Pav7pI5%e)*V4dI%gN<2W#yGAgzSe?5LWYqlU!Qp$!o z^M?su&&bdqj!%iu{+t+lkwe^)vjgP0_3klhHzac+zgc}ZaaiExoc*XYNzb@OF%W41 zGPGzqXTtSqqS*U=Yv*z0b?s4Iq z_wLu+KODeZ!?+an-?5-FQ!lPnw5xP(a(^b2)VZoW&Gev$dUiK@9lA0Hat$?Pp0OoU z6RE_!A{D~rT>VsXbyx}oIW zwxje~%sV~9(EFLXn&G|bS3RDJGgbd#~E5ca19g6#0^H>-!s5841e0TSlUFwtUq`oUbxJ2tKqf%hL!C zOW5*OA~F^XCZgyw&2-d$E3MOVD$=yhb;T)RDm$s6qk@D~u09wmIz@(A@+O+DAIP%* z(3aZw_u-Zlo-M&BB+n_kZ8h99Vdt}&kiwlMIiHXd)?zsD)<^|*Os?uJegBM&Pca^<`jx62*T3|Qqj1M z-oiW8cP2C0NX+h2XL8D;ZEoIObG9AIqq$Q!q{!1ELsN;pN4&%t+?ID+%0;W4+VN_! zvbPOqASr#cSthst!fs@uR9{h_BG2Me3R=!=%$28`f|??KS9LceI5NiVTS%+k&!1>o zIeV~iCNpL}kEVAuZIfqHRI8nJSL;;m)f!c@7$O zvUH;IH2i5r4pvrTbcYgzio&{h#Js}^=%NeuspfafeCEy+lQUuihIaIf#?Cj^pZB)u z`7+b0BNRldWZR}DvU(m>JIfLHI7!-!ZzpfdS9+7(NE5f5Eyyfjv1lhx*5rGYM5RZW zC({`(X)SM;#X6t-JG zjHqgU!)0{t!wH&_!Cn`>GmHKEsrTEIt!;LEPG#6p9uAg!t6`eGuGnEF&6)^|(kw&R zIr;2BhjDq+Ra=vnrkAUGS0jh>RPm{KBh8;Tve~92KVo!NV#Zw?=S|ItwzyEW0x$MY z3yGrl2uEeS-kD$z%k_e&yu``x9&YNRP(<=bUjWMtm`^GoV!1Q98S`3x=zN5=@$nLM zcJ=5S|M3b>2GtaHOpf&X^-1y`gKjH=RaaZTsjGCm0w>N_7m8+Vgm&r-hI1R)E>P@S zrOO#K;HpJ1SyT(*k*0D(iV1UyT#rn)*TbtjC&XK21D}{4yV(3tu@o7>dfS4$*kroI&;#~{k2?UCKNo$BugJfpl1$Mopgd$DP0et z11y_s_~hq})7=HWSKv%>bIN10oF|Kn*c7WXMWZRfQMJyja_o~^>TRnTUColmBMJ?g zekNN~On2#zo}#lXtQ9jVGX2!FMiY@zHr=B?T1JPa`_)08)vskgT6;RnKKl`^LibF+ z|1nMG44Li~iZ1>nq4U#&l@cit$GG=5f7qe*wM zctV)!RF`?`BLa`PD}|S5-g!vSm{c2?c7C}_M|IxV=|u*bcg11cSsz_-TRAnJyR9!Za9F8CCqPv)s78PljCr`AM=P>^W{xe@JTi%%k~${vj1mqG zBClN@j%!i5NA^zP92HNnSXa@hagNb6UMazESF>d0f@o{bnc{pY-!r7sMx}7;rA^@r zB)*al#pIeYkztlc*m z2Wyf>6LFR*w>&>L|I2)eF|S{(J&hE&60CgAX_VxY#P;*A4&9Xbkv6YAmU5W*H@3H~ z8fLBUzkk2&1o`sL+4KxM;-8Fs^10`G(pq`qmuuSz zJpF5AS_=1a)<$xlxrmAvMy5HUKYlQ}#@E3SBX~!f^v3$`+?epFctlno zHbc$S&gr-^SCuk;iZK52R7B2zX=YddGL^dYgIp2JzOmQ2&Fq)^W#?@MJ+X_~yk<{ThRx5!}6wq^J0-O~5ku3&}Voh~$3r{}+< znA4KplfZc)?gx*m>1JMAw|7~oC>$Z>bzUZd+%a^A!281+c#(vv&(eKq;~&9sw8xCw%Wu-FOH(B^`64%Gn-~vi#Qd+BdM=llnR%*k zesh+e^=xsKxIOr{VAgkDxgbv=4&OU5djXl9E#zHa8sMFg+PR{Ze$y5r(WzNwE8C9) z%r)%l2AK-`xwvG)^x^Ov?)_Cssuy(lUwF37dUoY4m!`(>943X9_bL+t-*5BWYF!Q) z;SsOOb1^q^gO}c*PNzXbp_m}YtSFN%g11=?VbB`c(Ur?+%x_v>Qn0-saEw7E^%+hn zzuuh(_`BKSll((<4lyU`nZry(4Bw}lQ7-T( zz0mQaj2-WPsp1vAYEDbe1&7_339h8!l=^-IRLegyQLd8by+__9>#XBB6=CxPtXfaW zGJk`z{~+9*PBn#+mvLK8q-aWwr8I?5P%=Zdg|VA1xZ>=Y%YSVZnLm%1@qUxv7p`VI zHt@5Rv43#b%m`lihIjZcD2$T~9o9P*eQzeH3Q}I{#In`&cIY-f?p93JI1{asOe={a z$k(6j+{^iz<2q4e>NiCqxZ38lI30=4rhLW}{=GVz z{$_wtVV~0ty(Gh>P*-K*F@GZxgI8hOYEmjx=y)ue*DdLZy60FhcBJA-MJn=c__%lo0*68Z_RC zw{UI%Yj@plm;KOBxajqpBB~S2SwBohdT+X>Nq#OTP+s3!p`?hS4d48Ejj^KFl4J2$ z(bvV9UK``^*L`^$pBr|z>VFO>cl}`6uJXSud;Ra4BuSi~mf({Tn(Zlr?YX3WoBjR% zjytnISj!i`$nbL*71MhS$v9TcZS9mTm`!8%x*O(s?l_w#;sLlzgiDtAgb*5n&;;G>yuCceW~Y?DKOXAyt;K zICmdw`(JZEA0;tU8yQZoTjS|4}X$(hwf-kp4`Tj^_Dmb>=<1X3mVl& zQ*0mD#T`cpO40Rms@y-sgz0BhM~;|bo84Zn9B@X&-?_;nmKC}?FZ;r!+d(#FZGWSV z>+tihaVg7ZdE>HdlIobx0YvW#HxyFh4};u~C_iZ3z+?7aH16te0Bb z3f%lSLoTrOm7DYGRUFgpE(2zPY_jDNEeo3#>zYeVUK#!UXzwKUXX`TehV3iM4s2N) za;m6=1r46j@mTCHM5j9Ue>f0Yd{kBb<73&tzC|_r#rOPe`eKDzv%KpAGS=~{sV=5_ zx?I;>odmP(Zr34N3qFlkeDwa1QsZ#bOO)Mc zV8`;;j}<=riZRomV}tNWR1*U=#rHVeoKn_hGX7^IHK{sU(^6F2L(O3-O_GB)+Xa4^ z8&63p8t@GT<-e+Y3j$mAgR-3&imBSd?)hd$>(NXDjvo1Cf!tQ{!$pEY`yp2A-}Z4!*`hd?baj~~ zzAl+gKl1bGvJ;GrROE+^yEejI+~Hm&B+2Ldr`@R;PlBY+8u2I(s6ho2qX< za5!VRC%;+J9A&EAo~1j;?{~f|@RDN%?#241F6$w`-}|behAO`VoC3AZT|H)>$#3M& zL=Drgvi9fSQV7`IX3klgoHhAgxhh{YUn)0Jp1;AnH}=AJK{+kROH{sOee%bEo~47D z{khtP9VOi|ky?n2i+Hdg|X|Y<2CsbPwoJ8fe^91BAhwN%v>a^BtVilXw zCtFtwMoyxBxn_4N?S5PJiro(p zv)Hy}->53PZ{>cXw(~FKa5cx{z8jMxZCH<(95h$*m6n1QeKT^;TNNCyFH)jr9wRMm zeqH(z7V|N$v*mSZvHkp)iu)Tjw5KJheB5>9kG(!tN^z_Ww{P1#qN-S-ytBAn>Vogq zH(q&o5%ue3yJx-GbVAh*Q=5{ zvC0x!C=E)lSyWqh*Qj{j^y7-A;F~)F;a*`{MFeJ-fO9ue@4Xa3B$>61--y#RVvPHO@HRIJCGt$-Y4&4{cmE6RMO_8} literal 0 HcmV?d00001 diff --git a/docs/source/nlp/text_normalization/wfst/images/task_overview.png b/docs/source/nlp/text_normalization/wfst/images/task_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..98aefe0364fa333fdd279f08d6b1346d1e7cf6e3 GIT binary patch literal 49787 zcmdqIWmFv97A;C3K!S$go`euQxNFeh?(Xj1Sb*RGg1fuB1b266+}*8##(9h7QKYHjZYtPG@lKd@wK{ zV8n&LDY>N{uXs2qLA{_4P_56S`>^j-LtsT;TM66!GK!&4uxbe2oZb9b@V!bkJUMkI zc$XzGmm=_%r%Jiu0?sA*N6jK$fwWxeV${B&ST^dn+%Gu8ffDtdZNR_{`G>r<(!g=R3hSAOF1xmNy_idiCG$ zz^BI6@8`7tKK}QJfs*+DLpv7wK`{RvfX&^Di>TQu62C^@({z5*(x4AEitl-l|1&;s zVD1rG9wGGgmv`PEib^3M%p4skq z>>W4!ED_i5daR{D|7`N(Pdw%SxyJwHj+-iyAMZA2uXUSK+@2bIJ~(RK12^(|dRW$7b;r*u^Eb3_tE`_9K2ZzKSI9-%b5JC0Md?$Inzi!}_01^++Gbqokw$0w(DH zI5*GFw3}hN;nM=6IX$4b4ArGX*E zlRYOWC`#HQ>`p8~Ko&TjO}Wwk=g8Q+3e?X$;<|84{r=f6o3dxi9;Lf+|0D=@KkETS zS)hN*o(K(*ii&F9s(xGw#%ubZhC}X#;MUL+YDTCz+ zc1BO~QPY>aPH@X~B981?w7K_~x)~T4g3TGRls~>hwQJlv4<{)8M@((9WtkIrw`b@v z1m~M4w0FNjRhE13Y_mkGv4v;1K3~Q??Iyj3GpQsB4SGL^k7w58VzUEL?;YpOB0+N^ zZp_-k{?Fs{PH=hANt*%-2?H&tLejW8-(2_;hG5o2!A)2j@p5gaBL|77JT{Dz&|dD| zHvF8=%o7HK)`itG?AMC0NDscY|D&xYlx_j}SYb-f*8QgRHyJ@+KDQ79g9Su+k2B=$ zEGn^UhqP@WuCl7UGq&{HKI-)s8{|xUjZJ7G4q=M#KT_F88%`lQHiN383>q|PF1&D2 z+oj^Y6gIbrqYL3oB6+eavHOF_E6j+X5OYQrZOPiTH~rmsf!|a!kNkG_`%XM&1^zLu z_9sAiY_ujQE0e!_qcwMUSmiX0FkYPh#c!|8j?`D}ha9c2ZeyezkC6&5Apsu4JA46F zrPUKpXxGkuM!5Q?|D+Kh+EvFGP(DSyiTb`BSiy}Gh7lqx>ioVtrB@oChcM9B#Ei1o zFPqu~e5u##t`+83`hvHzVIx@e4uiGp?f6@U4|vGQP|}tvn|&5n)<|$So^xB_nV-(w zKjtI2q~M6FQ_oJSOILfYVo5mA@FWm^T%I*~ztfX5Icp=t4d`mf;cm^f*!|mH!Jh^n z7h9}ruj(go56Ip}qR27@(_pnwb6E(gz9h0%OB~Bh1W_ZGq)8!#^v8 z_1`OQ#$y4)YZev`>D{vb6g6PYKDmyHoc<)wZ*Oh%B(bTH>Y~ftsrFldML;Vo#kU$h z+Dcrl%Aiw_gT!Pl$;)&8>0`!~m!NQXCLt*MV}tKEpIlNA9c^CSU!K>wO>{)J}kS;Kh0@%xbIJ73CBSw zGCaF%eI7LHZPy~k_XsbT0*lkh-P&N2ZEl?TFgchvNydxI--Aag)!Chtw0kixfVj?d z&Wt`DL>&Jo83;_H&caic?Ti$dNt7Dxf_3V3CtI!&aI287m z&sfDRrt*3}Vg%f|&0cfJ^ZaQhq4eB)@FqeZ{I?c;JiI_(U^r^=bb2iVU;LLG_T3+9 z1vJ>Nda47TefHOSKsNq+|BW}nWfCXt_T5V8Op)U8T01`m&zgNd@$HkhhK9y`jp;ac z!1$z@lhhZE7M*g-^U?!?leQb-cRW+^xHza2PdRoOtNj zW=Q(1QlWJ4vOPjU75=Ms^nv>8mreUuuJ6IwWT$jqHHYZX7$ZqRFnHsKD??9sA%>Dr zFYYz)G>r@+l(fK2!UTDsIS#po3h@S=Q|okHFqFZS?J^;Z@4nvp273YX6y<%Bwjib` zhRN64-+vkzQi#Fjm4+ACxq8!zYc-sexqy3}bwKc3Na`~asgDPY==dLNsc>=`;Wr=o z!mzZOkIc-tp$cbNphKt!JqVp~Xz_6VT$oexvbs`@cEqRHyvNnKRb}jPkQ0j++)>D* zDP~#dnVniK!l_1!lbXkxQ?tAzT^TZj=rEA1DGHv%Sbla{Ew?~sl`YGD^j+Yf!%0QA z>TBQl{1Qh=LrSd#7~T9(C0IzF|tu zmK{l&U$ehhENno$TF?G#&Tc)Y9SXtdT=t?M*ma1ChD2+2Vd-ePoKRA%gmQTJ6OHTJ zxMl=IEGq+{3#;lc;|0g2qsxmmpF+kXNObOowH)%Lo0~G0#oY61(!q`um(SDrQ8iZ? z98O~wWD?^(wmU&=Zcci_;?%OZw!|2{^3F%dfMw`_v(#`p8 zLr+gnL_BW4>+9=<^;*m(KOrF@0i)%4H7Q2K=jqnNo1v^MqHLRi(=^BK#>T?+NK9EQ z=vtWaWO}132k*fMg#X+o(Fwl<13^!Yo80DZE=Y*(OCapYXUq5M5wuv!odvi1NG_9v zr+NI#k^F}gM96E^F~}q*+!72ppR+|P^}dP8UbT~0WMNDj3U6ZEcRyVPJxrpe+3}1d zb>M4&G)G`o=fq_TgZxkZ8~ttgoP2`$4^3Z^o`k`az%w?oaoX*K*bL-_3CzO{wHHR_ z(g-)s3FjYrv;!B~jv`@^T;yeOLdg;jQ zz;U{^%-bow!ktF_#eo$pi?e|j(7RHv;VK6$C$zQXb@o_JXn#6s(LQl6eVoK8&Y=4^ z?R}TBhDCIz)Y2Gmn4qf2@WpT}_jZYJ;qp*a!Zf`1zM`j$Wp*jtzsuIzXt?Bvvgrhk z$X^svXEvU*nNd>Y<;Y9EXImlLY=hzs1uOc@gf*1;O4BQ)P@HHG7Ore{EzJq8)DRZ6 zv+^^AjTa^jXXG8uRn&Mrq#RqgrMRxTr?@_!*sAFve&1+2YqQaDnH?`L?nkUZOzet2 zv)P?{X-hldUfWLVG`4zKeaeNI-@9}JFYVE7Nw40Xw(edF&A1%4H>4txXb#WbP|rYh_MG!+{{9k-yWT%3;Fmw_hr(_u9QjzOhtJ zRZbL^CX(Zqk&ggOo;Dyl;Po3m+Dt#^ZSlGHx^%oecnbyctJ$fAiPjK@iRtKW>=_nv z4ek`?;m7GHan;H&J6b6V2kJA|wsF5f5I<>*?gPEh#eXsXkqcx#N z$3%5Mmp}b-{Z#_qg-dPZaD{99ElC9n_Sj8EjP+%+>tmv1dZy zsH(_yrjf9nd9^@2o6jD7J-~&ubJt{#Q@)Y6*E-|0Q*&Q+TtyQ8j=gM>do?^O$64vm zWs%#hge=->?hyui_y~4f%M~l`Q9WBu4lyrP1sk_JbrQESdran_aElF;3|H zpO-CUDQmpv)+eyDXT$wFEL**Y3KG-Z1s}3;^$sue@=U7@YMiJ2Lz1G#=fE#GEy=&< z7*I4cH5VFeNqq&@{f<&)C?w-)vnndk6NYx4EG~No1DiNUx`{kF!UY`l_A{TUKt^`X3^5 z3XuSi;mBo2kg-XIhYV}SWtV&^_qPS9aExE@oEx0q7)W|{oRfwxYbS9}w?;8-fdpTB zRX1^JY5IYw14x)$Pie;h3VgQX7rT#`AUxaw;t~M(Y_^1}!3kSW{gcjVK#LfxezF%A z;x7~cES4=qEa{a1NC`4d%}OXve~#Pj#M8&7atLs^;qh~{;BtBFO3HjAMMAXLWe^Pm zcgez4*jFUgSc!el*4IbsFT&(?bU8H~{eE#)k2{h)kjkGK>?&-%YaStV^eVqI_+||; zHoveA^*2{-&Fu2?v%7uY*%0Kz!I0DvItI{;AE7!B-zrHrtmv=W8o?i5Uz919Vq7-Nz+RjW?T6%(Ux^V06uPP=81!r~Ls2)Qzhm%APs z6@eU8TTbp~V`;hd1YPStYOO4ox;a@25zWcX&6QJA3m!GeDJcnCU0ubX(^Lj8nfP_e zuRWo5!yr@I2MEv#*cP&52om76cpX}wri0iw-0->?8eJD#0X!pKPacyVE%TXV16tND z1_ldP9?SFG=6C~XsSro{^%M?mVR9(gKw8SPfRL-x?RO)NY;CL=v01M{)$in3iF`6h z>V-v9V6TC=lBRCiwC}VL7t^2&X&U;rPOoH81)Q>?xU|II=}Heqg!30#37SAnn|_{_ z8cg!DNQ_}DZjzAHMcz{E1AxCu7u?{2WXk6aH+{#dzU(O=>=>4d7RdVmhaR2)4#yrA0RWV8E~v}O7}BIQJyMab|2 zlYmsXJpcgpXur!=1_216;HiM~gkM@X+z!~Ah?6tA6iBhzY3>U9x$7*e{*%`RUkA!~ zKX)Ida!!b~gTeZ1SDBWWbUAdz>3p5JLzj;|arP(tKUB?4o-;-ttjB1*yT4m~Q zmaTaqXR~qbZmM zsZPFkU?e{~v-X@APt;nGV!?cat?s64#(4k1bOHS$X;WTaz{rRciRbyZe3`VNU--=X zCntG8NKq-~YuP=7g-gykqjcke$B+#5XK2#NEPfUef8=Z&!02UM~5!SEdWd;{r<9`EC-lgqORxftpC0A$Aey1#?r7tJZY6&?aW zIpMSs0hkxQ=;DMp$`|1k#RtS}OnMBk9Q&$XOwCVzmkJ}N_6;Iwx7l}sZvw8q`}61Q zXDb!L(BjYcCQGzzv}yn=`%)kyI?bwZyo;d(9mctlDG*kq#{{98@=0kMjKH z;^sg``Cx+3b@J?Dph3%6wKV}TY*@s6D)Q)V1^S|;gA1s5`v4%^wTA014RxI-O1(<` zjDG$cu%J5h8{shXBMrphg6)1%_r*0W01~n~Rh7-uE*MgX9IM#&lheBXtY` z5~B+d8e}URs)ucvUcpRJVb5U)>%>-hFgVJR2GuF61=k#e7-Nl&b{^EOj|}O%Y%t9} zHj>OO@V;3prc62NF{siLPaZ8+J)FNX0*HudOw2}4Qk2j)bMosnGQ9oCQQDLg)|S!? zZ6kH^qe9MN0bS&4{*v65+#d@uqer_wI|fG=Q|E%5!$^C~+!V;BftaQ$Xb@73-u#vB zCs1oMyIegbVnMH8@8;*dQ}S3|`MCYOl($e=BNmsEHahxpChS`VVQ19UrUKcHp^-JfozwKCK=%p@jmqX%dtjyofJ zk3f_yOJ0-ss7;~c7zJ{{VTx6m>x8Ww#P?*gs=ZsElCjmp^yi21T4A=k8F!%#fp@iu zo0K}|h)-tf5W9vb!!uC%BY!TEoHYyKN&*``wZJ{YeQ6G;JesF-tYY zrk{Q-M2@ZaGS#~{@;6GTbe%P`=ciwWNC`kAGD8j8MtE~g+Vu6Mo{sAAzi1sULn%ER zm{Cp7mMoZ4NC;MS!_tehb^};rvC-Q|Qq@Pk(wa`MhK&~HtuMCuDx;&Wu>r$c3d$mx zi}SW0JP_bq6aY`}&<>7&96tC=(-3@_!GbDVxL;mSJ>z+zMk^EF%Ea>pdLuFPeu+Iy znH#a9F@GyGVRw$2%E#<^eakUAjydIyhRt#NxW9m{=AhJ`?yd;Ag>L4_2=(vyLE?%x z`t1T9iaczwk4WeYo>3sICqP`?WFr+XmRmuZ8k7}o_a(>LX*ieaX5k8O-{| z-P7>9j2#A@cM}MQIz>ic)xfz7R@b*B-u4$E!{zR!84fKTC$4d^ybMdJ>!W{zGS!AJ8iIDGD;UyEvSZ5uEFvV~0txF9VJPYkuwaxtemC=+Av9Uz-qnmkgBHrnRj6Qi^ zTE+9ovwHq&vUxZ+BPJ8Q)!TW`waHhR9DX~M zl{4G*%w#qei-CJMr1#*nWuGTf=qMQPNu-i%|}Bk^yN1q3j(rnf~OiChrb!cf;!N%|S-;@>3Dsf{pzEhM!nj&`>} zdMX3=hTf~@K0z;zw=V#<02I8pJ+Ocn)taSap=n_bsm(ev5jl^GuP$jCdPqB~+3}~V zy^2_cJ^(@|ZCYzAlxxj!?Z69Qx@W1NpmaG|1I@Ja)DQ1a6R}4o0e;qd>}!qJvI9ak zjmAs?2Z~()7unwjgi-^IUx420QUdVz)cv#*v?g&`iGG`^95V?6cL9Sx_H<(aDRB}K zlgBPqp#d~55yg&<<+e!}0Fg96LbkEese{~&1DQ4n zoU9}qeVo9_^e`pm0s|4zNO9)PEe6|Hmmh}){lm&ca*mYVPOO$~n&0H>6Lzf$DPML? zzCPq_x|b~2w*(}_Cl42U*^{B#P`HOlQu(yRK1$J#a@HkkISIym6UHO|VjHT^n__=o=#0eY0c&+oR_ zd3(wljrA1;mkHs*CRp;%qhKYT-~LcN;y4`t5b8QI4ZG1jPbHR0DbvDdYvPhK7r9#}I&N?EZ*9bK+Dc`# z27`)qa!mnn#3Vd)&ZX(gJ%Mo|v=0Y6tJB0-FA(u2Zyi54J9hS|I7PwUX~Km^NJ60} zM@l>}=DJ!VflqrPG{oUEKB>~Z!>ElQGGU&t#R;II7kkd(bON+r#JU=i*D0txkPh;Oaf}-wLKJf@--GwI< z@}jwi4XZzGYYd$^u7er^og+*0%g>JEC|O>N@I~^D%u|)VIoAYzKYLY78n9@ZAb)k8 zESVwq=S^N-=+K^pS=D%f(GGzMe|%(4O-OQqgnEOQ-OKP-eJ^b|Lef%kEZ%>60ov>= zjp%h{Pg1T+>3GhXO8Ckfi+r+!vny<~scQV_Jeo%Ka&}<2y_F=!7Aif^F z2LOtct-hExdGCRHsrL?DaXb5M+fKPD7_PSzR2G&^%DR5(w!io%{f;h0y{_@+kn3oF z!XE=deO-Q63k$*f&U-#}&D^8>};?DsoGkop0RC7TVIY1>nL>(~8Em zYU7C?T+T-}H*8&eX+GR@jmY-IqsD_m$Rh%O3<*nK_Jm5G&3S_m`aoKn zPrKb#OOqYYIa`wz{zuH_Rqx1Gk!RTr#OC{+tRN4MgVWPKn+9g*L#53N4)1m!VJI_5dNf%NIo7%M`<4@0c6YU|$`}#uyM2wRH%!@i%=-x3Y&&}8eZ53>x}>%dmk10m&JIC@$0;lV2to-x8R ztMz9Z&bniMnWqXLk&{y|BCb^#=nqnbK1S~@rTmf9!8Avl%I%kDAF4F=#);Xo(&rog zs)o9>^a=9#B(?~u7*hw+)s^L!2?>&iFpr8$l&8_rt!v0BA}ORJ;Zj^yk{3F3{GN@? zsp{f*`1nt$=QRusy_y1YO1rYSKYIeWLbb-_VqGljm`CEdJh?amHvY+g7kfOxZu{=B z^MS*D{uPt@%`#d&4^K<2DamM-nLN34^O5A4mm>e23r}&~l=G?qo zn$C>((KNntw@S;xdo%mFPD4vqb94dqd+ZA943%+9?c;L1JgyhkK|~LCOLtVr)_X5P z$9NTmr<|@0J^O|B3dX36&Vya#ooFiYopgdV(h+lSxZr5rJ1gA)huiC!PLvV zu_pe07k0>puSKNQR;k)1C2Ry6tmM<~W+xR;>TOTw%sn@8{s=lGAw`MgvljNpeu@y; zDScTT+CV>xj8#n47QR51E`2#Qk${#GKNHlfd|NfNRBLtHkG7I%QbuN0K zh8WGk(Q)9uwr)p7uAw>BihH6d$ygTkD@5XEdW?FOM?Gj*XQZ!869G;Y263UrVX&>s zXmDdB>v+kMN}a*2Up%1n4#b#pGdo?`im_8Q)$@Fcw~R}KOg(>jzc4;%5qj(%+q#== z4qzTfcXe{)~OU_^6R60|aeBRuM1G$2y zerrp3ed0mFLsqT6SkSOgovk{!vzUFX@8Y52aMSA05jK{-RzG`d zSu~(2ddhxeUQ|)Ab9On{%@bjS*1AiK#xy;?0^_b*l6iag#;xb)nBKO%_pa*K_?P<_^D88Tc4 zHzh-1fVJJV)i^y?KBY*j1^-8%uF7QTHe^JxWRUdH@so!>)lr5;Cex$?Rl=s&7-^k< zOOWlsE{j1d5&yzB&3=56M+H7AszF2ERL!wv+qRCE`?HEmf7eF_yWl2V6?y0;$F8?K zzOheLbbtKXr;H~Qf3kk925H%i({yh@k7<1(0X^^%aWBY#l4F*Wrf;Va+Xpu3(KGUh zhepZ~yBPp6$`~*Z=H}%A4w+D<%b3Ilp?#FOk8ZQ5X@Ibo(lA8se-A^P z<~smG!9xM81%SK^lo*y{ywI&z>xIut@l8lgwCD6I>q3f4OnPfb`Yo!y_>DH&s5ENO zsQ{&Mpuo4`mMA6m1(0UEU5{O9Q=5KsGo7vTM?O(Vr%9)xrHV4M0G_g3@&fz0p5{b8 z8!kY*#0*9_7j5vf zN*4euT*p=`;=SC;9O)M6brlP0q7DtP>1z3KpSQJdJiQ5(?0E`5nFFc_^ zMF)zLsJxSVTtw(O_{j_Hi_;R-tVw{011OwCxqZgCX?R#nO6J2Jop{qL^xE*-{U@*7 zc?Ce>iY^-jNYxh6*89%tUW0G)*xALL@&-!b(ebH)qZVYn@m&H&Fu)3nNnt-)%7r+> zx74IAZrv$Z&>kaR6a!EkfcF8TQk%EjtD%BV6}fCHDW+5V=UX6*a{TLv z=wJ7A7eDL_zmT>+>q1jV`|93E_#g(YdW0qWtPP>N+U)GmMM5r12-O@|W7L;>*xD}% zSGnyHRdp-&+5#X{Z7+P6XKhDAxV7!c#ZO2^338xsai(9$BjbKQ44G)ut4y9ROD3T2 z?P4qZMwPZfKjm9TqIn);EC|k4pO{$`Q6*)tyru5<62USObessIc+)*7%7$H`n$hq9 zF)2x102iYI@qOJe8dAIbx94$c^HoKW(JU}eOyTaeLc23!gxDeFpL8HSLk7mje?IB7 zc%|2n4g_zH3`xJ1Pl{BMCwBUwAPb(wsoayl509qN+6*8u zB}LDF1i$E5l!1_@!6LcCh%?q^3_bEFM(QGJaT<<$xk#i^H1O3Dn!&3eU;}yKfL2B#wf* zmGgX)t-sn~DO{fad8K2bj{zUSMR3D1fmhmiJXn0(_385Xng!}UQoJ7JNsyu=+8Ziy zsOu>Ipuu~T5RZ0-9%NC-r_poBUVf(aHYp5gpl)vc)N$+2z;#YFdgKt}Cbdpjju%Qx zYKa@Vozd=Z?%K0ONYQdV#xc^hKc>c?2oL>@bDS~H?L1_ic0sL?aov)fYa@#G<$KMx zmVFWuTH1AF$lkB(N*iJ-?#ZAU_ zgt|Kfe(-orKY69RYl0@+V8DmU}q> zW%xd}zLg%C6werbzjgh$5dPQjA;zfr-KfA6qN;mKvn!iEemt^Z#KCmi_+Z$_**7^q zIWW1J^vAW)tySP|FFMF+M3N7z#6 zv-e!*h*6|4teH$oF3b>gtUO48y@s1KV@#-&Gg-AglH2byMheBS3;S8_S@!Q-N%&F+9eKZ$Gpnb@f@vU}^ zm$3HxF5StQ$)40w&SUPH$b+r?X`uKI6fpi;Zn%f`D16dmR%0ITb97_kQ~R0=)^^5C zUhu_0k(Hlp;mguh_!90ot5em5XTE0#)%6+#4%?6N{L!yc)Rb=hc^UL!P$HjN3!im#C*8-Hkz?#z@{5Ea4CMaQfe0hN^wQIIo?$H$$>?WzNxhRUsQgxH`_72Kh8i$khhI|WSX zpu@t|_Vh>kSB(o*Wl`B;#eqvPg>*!>YOg%l{_LqDEHMuzAO8^2cKTDn*p#Xy@Jw6C z&(hcipM&R5LeP7%xH$5PetlpkJA-u^GBONpu*+OPM*fQH$UO$)MNxL9tYt}g(sC@= z)2dW<{9^pdrirU*oglxF>wy)t;fiT@(Y+FfiaPB__62-|4<}2%~uoY z@Z9BR$=AtTd@N*t`tB^8_^3r;2zeCgy6MD*nuI;_HJxUOJw5GbS0BuBYgU%W!g-!5 z+|mLURWxJSG;D!-R>vOXdB=ey@6}6+okZoH&?O!!&gd9_)yKEAG12|Q11nAVb2kD5Q8*{*gr2#w!$cagR4_(t}L$jW!?I1c+ej>Z|?-c$O>^F@?N z&+y2jl*N^LwETRec>?)2eri}U<<_2i8`%B2TmKmXwXK1+`Wxi}s4kT|SnHeduBpb5 zltf#5K=fH{jq4s;{gW7R`@`ARZBC-{#8G?Svk?7d-!M%g=d58XR#*VN&jm@_W3NXa z=yT3yT0Y#dS3^?i1%7Vs=ZlU6B)4LjQjaoxgIV+?ymwpFtKxmVvrZhT!18) z%{?z2%1bV9ZGrO4Ui<6#%1VQ`Y+HILhka)F=giyrG8f=G)+psBkbeN{PdDI+dkcLR zk|N!~D%6d8t>5WqVpjMKTeim^z<~L^!Nu}`Z|4hjco`0j{b=flZ6P942h|#tx8pXu zluViwP{;*RDrz|bfPPVg5#H;Y`Q&CSlGV*nfSEfvKQbb~0j2)CO;WF@5iCe#2yiZt zY?_2a{NV`av`*{UnoD(CVHtC$#R;Zl z1l_m%B$I!>q>=2i(c|SV^gxU+^@$?4{c3t5^P&fTg`}cmGBv;Nu{1yVc0@zae3E=q z??DTPE)jIMe)nGC)IGI7%%|`9#2ZB5a*p_dB+*aleuLmg#JkE?JWO|K-e<(sxPbtT6~2p)f&vGT94UBTn)B?3SUjkhtccs5FS&YiaSk zAI^WQkSto(78)i5v+$^XuYy~)RhqPOg(nmvYiykatvD44n2cfjfL|0YN$DtF0wj_f zR24~SvskpV^euUlbSsR##PB7(X9cyT;f~sses#C{H|>6s@Nsvc-eQE3f=72NIEtF| zP?6?|{dEJH1HYle)BKn_GA?Y=HZL)SgelLk*R3BajsUsp3oehhtTPZBkSLv%q+As2pQ)8PFc>2*=0di7 zk*IO3@-OvxS#28&Z(Y7pqAIm#$=zJm`J`;$H%5CUPI8KBjaps=62S$94|#NCF54nPx5AJF~vI1P19GXlObs1in$%}M?x}OjhF$3Bu&7IJs9V6?%3$;cj3uG~eo8KnIvD67 zg#a#aq#xXUYk)Vzq5qb<(Qz|PFL4QA>*2NAECBTfZsdI=bAE4P9A_AS(7NY*S4=1d z-~B-gReHQ9G)PiR0d;i6uR-Hrp5LVW@J z5BYuX!8_@}LEIFpNrjbYfqt*EUxB(?-9DuY15S;t-Oqj(!T2*r1SGwNH!Cu1vb3L< zMqLPi1l47c`24-yy*W4tt9b$8^ZQ7>n)$z2@^4<`bY_Z~{sL$TkT0V`U?n66q)NSw zRdZ-`RCH(RTgQRHfsc5o3cs~Gn$ylMaov47eaqqg>KDLEV|HIS8BzQJ3@k;;o;K^N za+>N!ar&FEn}O7lgXBwZydT)2*QN?xb-BMA@cXZPtI2QyP$*DjWj>TjeQZqZS9x4e zZp*fju{~J`>L=8@?2)2XA%yIWZPQ4$k8Fn+K13Cm?y=o0p6>PLIo?gjl0WM0u{~zB zT0LlU)3je4?8WD_^4=?1^Fwc*p>SC)Jlc8R`?cjwQV%@-yQmHq|SYNZ%Vt z9-AZ8267Whz$YTk-A^=@L`4x;e+4G!OQa`&3Z*@CUJ&HWKuw98xEcdbN7Ti-+DZG6 z<@0T;_{Z(d8d}q@6`7tO)D&*ZBB0K*iA_>$86o#Lx@dVu!-*5~R&W z%DNT{0Ai#W&d~9}0qAtO{0c?OlB)cb8FqRAaGNTU2Q8h@Tk9PTNy~EkgCW#apN}3Q zRK2ouitPn~i(^PC4a$mQRA6+hO*Qp_{DR~{A3$v+%VV*!`~aQ2-kY6SM1*vV#Le8O zC^F&bCe-iMToU${zR19ZSIkaao|q1-XbZI(lGlaVuDaQ8Kb%jfW>j3@*=qv?(hY^E z4Ep0^1dmF)_s?qAqr&1Ms??b-09#k3GK2N>D6V z)Xxv3#EOYJAgJlh-m3p4G625F#;AFz1av>_(yV0J&;Vh{pN%B!aB8$b^6BI5170IB z*zt=4Z2MBSjS2i|PX&*g9rWMp$(apP?^w^r1SrBBa3<#$2H==UGr8wSc^9C7O%`xM z8n+no1WY4y{{LnPfC2(969CSKZIj*0D>zvI9QnSO-Jw6b&5};z9+H*!-}QCbzLO`D z8cqxZ8yK(Vx&x?%%ad42BBf`2%mO82SBBp(OoCPeLE+gb8;yPrb$m_rEK;i?}0KED+N}M zK>u+9fJfhFSxbfK;~}hc+F0Qb0$Hv`LPPo4=jN1`Z|HvP;pKsA)Oqydpp?w#jaZmh zUEd-|1YX0NTmDuVI8-rcuAA@1nX{o;nw7Y=39EO`Yh8-7XqNOiJ3(EAR=~U1(yVIE zH7esY4RyjT%^MKnR>=v!`Q2f}6tqU#jqK6MZ0 zHmG4J)6Y2%(=lTb7AiZm_)JPTw9^yka;&GeGqaJJHnt%kqc-c?aWIrDcf(|qn zqo`8HN#xP+Uk(}J1%|@)hV~j34^#}ufYIrR%2B!5_sMejvvtp&>uk!~)JLsHhg>1X z1iSdU78tU9*WO1D4zywczFnA5Z89&?Bl=^0e!{CzNSdyH{Dh{q}3#e9Wc zCyLu6&xSVka&K%pJnyK^MmI%AyN5LIZTq{% z9k3~{OoDp(GT)7A$B$@1X##c$(pKa0d?}6>0V)NRud+*d@g`|5`GuU*A^0lpwxnIb zHWP+E?b`AcDp2n&;eAh;RV%vZ`OjsKyDrvl4rMngC*Z>sNUi5LS)Yuwg70b=nie*v zq8SIAV6zSD={hSze>S{}qA&mU0z6vzCvRGUkO{6`T z3vUE}7IC)o=|8=NRH<5$>Xzl3sfD%GY9Wj5-6N`eom&(!b}r6nRwDGS7eAV1FV445 zYY7<7vyG;K9_h=9mvBpN2vIelhZbe` z*9Ou)m-umX794x|e!KI`)bPUi4;h{~?V9gUUEtTpwSvvZwhG7F z*`JNSTC0TrTB<%Co+Hh7CokE1=QUlbYwMR^#7R%L&v0Jnrq1Yg6rMV|&ou?L7yZq- zUJq9k*Yn31W0~BqY@MC!ic4fa!?r)HoLvp;J}dY(H2=;{(wR&w%+ENy$wm-2@%(~xTcb#tuO^bX5G@N&sJoQgOH9qlC(Rz{#c>G@=^ zZd$P*Uw?>2nu;9W^A$c*?Ah+{SuXpoqQt>lyL_th zoFO3fcRpGwb;izg>_Zjsza#07(@3jj{yJx-Dn}WG& zLYVy6scYrA65h$LOO?(HAUSg1_kS}*v)I&LIJu#NwJN-hFV9{NDM6tA0E3Jb%I)(c z6lgcPC|7GK&Uzk+;}!N@*tSNw){^q4D7lHZLu}Rot(98C%D$SJn*vpSZJB|%XsQFa zQ;E@xz{&7l-{KG?u_b$)z#SzsIJT0yb9zNcEwFI`Lx1~mOs~R&ubNITTW?;{LGxWz zzb9wN5rQSz16(0H#0<36rw06v{JhM3{Ps6Xg}PyAbDm2q#FxRcKlKRjI@OzJT<&U= zhx&;iJ#?q>t!46fsLW zngw+gX-~(eU`HpzKc9(b*3~7yO@XO=YXMR;%M1> zw^oy&+}%BugVi8ankGH@z3}+26l4}kTjCZA<IC9@4&u z>=czg!p8j2S}iBbHwRVan#FiSX3hE5rV6cXX;vtB4?Hp1Wm#h^w#6+#&M7zPXBzY~ zy~Tp(7A)?y6unThrTF}1q8d%;G*dW!T6vk-a$Aulp;EfIXJqn?@U&Oo4(|WQ-g|~M zm3?oZI2Hy39YsVzK&44ndPhO&5L)O(l$Jo~y;?y*K)SThTLMH%LI)L*PJmFPgOt#F zhrr#;IP?2(zueFF`JV@PG&yJQefD1EUGG|FpDDwhTSeHmo+Luha-IkYU%lN$utL>u z@8Qv{61-JkqmAgx7Lc(%IX>wNTo4K08P+fg)euoQVTY;UHK$o#W4WI4xai$_dY0PFoZPdwAmrrM%>IV zEL2YtL1+WX_4ck*QK{-b>z;16sP4i99$~-JR;EjTukUA6u}dM%xKW%%iwGhiC?5zl z>KoKZv-F{0P~=yBXZ=*XjjxNEVvDXea;8N{ zjn?NarT87T*mjRQ`U*WS*VdYyXvsr`tKqoWh-3_f6^&4(Ur7;zPm0X_qqB*E_Te%D z2P{<%bM8j>eO?ybbPFrO)R@tG)v`?xu#BQZk~X4{+aYMsAjb0cV?}h7uy7Qym7g5f$6U_d|azu z(0qh_f`?%jY?tzW*i^rx1^TT98Fvvw0Thj1H^vbCn-?!u_bp$VvKSSp&p-x8@{m6{~cC zx1*n1$P2`TBGE>o?;H8~&`k6L#-bI?5^bDBOHXwTV0K zT|SpK+kgw`kv>LN$=d-Lbd4&~#y3%0=E)h3w8$qc&};Xi#&_&=*Y7=<(o$!ZGUd#^ zwthEckA?E!)jWj*r{w7J(EOp^?ECPY$JJ89I(7w69Bff@>*FoK+MC&kA+F1@Xfe%# zcXZtqJ3@BtW38+SyD#HmyK^$(Yg#)&|eVbUzeaQAvQZPQ?xaeo8h8_JSRTtsy@L}Q1 z0S%7r&+oU5{K7bvN7LN-laHGGOX-nAX|>LfXc;}sU@b|cU9A}_l)pYLd(=rA5N1%& z=-Qy&W?g8w3Y%=Q@jT2fNr=B8TE4aRU1d;;x8@E{BLnt;+|Gqg?2<&lNL&Zp%e88O zn7YSOH{0){ep$JL-(HhI?T(V3@%;MPnqSE?!{KDzrLa;d@RKOXK8)Tdt0d_<`gku| zJ@VJPth_`I5f9kwBEi;~<%JGC(z39&U$scVa6oJBZjm`U$hsv7XAsJlW>W(XNMV1Q zHFuF4dsM0FUB7jWk5Av{bpwLVqdHY93@$6BjLaB1il&7(xAZNJk=%y9y+svg&$*(@ zT-wf?s7e-WC`nY<4Gr#xOfTTt28|*XTc=FD9BLOrvghJ1AW%gE-?|gx$QdkqmQ`5`aXfid`d~3yb_G&T`GZeCd;>vd6Xhi@ z>5+^u$ycvV9Meh4Iy3cSd#vO9Vev%@4}Y0dg)sVT3L5r%H>f-gOacPxhK7d9yf>{$ zla~WEA3b_mRPl>)&|~1_-SWlsQnxFKeFBpkKYNN34P>;q%?gA#SMC62|eQ-`$$AKho*lVzwrYL zppe(7w6xNc;&==d4iG@+0Zw0-D1ObgL6_fUo)=O>Iha!j(KyHg>U)X|81+Z)47kjA z9s#2@5wmDbpTpG<8aIt-c+4ydi!ea%X(usv(H-Pq@Vj0_Q%JYr;{oCBZ>Dav+Nb)xoUgN4AI6$fq zdf${#40%7zRiF#&AB(H4a%gtKVWpYpV5idI%(lE4cS+%LtB@L_n8!op3@1LGqsdJO z=VtFAC%Ne^$NblLIu^33`*nohP9eC-XLsQ(s_-Zu>7_cfy(EFCaSX9eo@q1lAyATj z5vR7c9Rtkm%*S#?lo6};eO1g3;5R^C-g+#b|D>GXZgOsA)N^QOxtAeHWNw}L$WuD- zOW()+%Zt^4-r_5NdFJ=pwoFB!ELKL1wkv)5j`p;8>xxv&Tr)+a>U2X>?X|srxD+1s zWFgJwD&Jb8cY;3mOOK|_l#jFdyCi<=t{pnoevPY*TnUxhWKOZ*JH;{0GKiR66?#mqdFpONcVRLeG0?Sv0sb{JJURVB(` z4KC*P<&A`*yib^xVIip5=v@?ULym8HD+ixaSkk1QKfk&t%pXt=x08BeykiS_o=HC@ zR+l+&KIpx0AXaX-D5)?QSD-seO^%%=YzkGj>$n9*<7z(}4gXRxR}e*MRk_Qi6stu9 zz+W#gjTcyL+Mwr=jVMOONTZ!R=fewyzLNg!u8uGCvj<%{T6`@^`~9&Rs=NH!>UoCh z!whBnu?pTE2gQr8vSrd`{eW$+|#+vNT>iq|+-n78Tdtd1AT z|LNQ7()t)-{8-`O0HSA%wrB3*jq;XJ`RAmr~F-*^{zl2`td^_qun%kI}Rw`d<5cl~Ymb%)gx^W}V#*_H=?|w?>2sThpceYs205h9n6R$P5A8u1%Is!eZFLb^#>_G~ zkYjA;b8WjB>0EWLB=a}iw;aX|4G-h}e-ZB8yVte1KIOhP@km$%>E(_uGOh0I?ak6R z_lLfJEH~8@F_8Ai)K^m@yT`X_yEo@uW!&a_A}~QAe>_1k8ln^AZQyR*wx)u6^vVDb zEVhPlW8Dt4`@-@G!HNL}-gQ#67bu~$`K=x^)(44#UtOElH>5-%aTcecmnB!is1X0e z#pco)x9uQSf*J(MARu%v@S}Z$p7C9hTFf1JfTobkFtcqb(+3lj;&IVq-|;JBLTK10 zdZ?7jra~v7HzZ%Yl6upBe{d*t*0z+Y;p1}&t}Qxv8*{m3b%Edjn-2e7D8uQoM-&n^BV#)(fv}}d^WL;^3z(0Re&R05xN1JM`2h(R+wFBZj*-OvN;T_S z#7pM~%a%XXA-4(tnPbW?lg?Itlu%c{y>$K-0?*f-7eX1!9EeU9Q{9x^Wt3)4O~(hU z!lZXIhoPgx9PYlKL>=DuI|P`uIflTubAiRdf2@aBtsK{Dk2c_Khh2B=jB-4ltzO_Y zuF&KbQDhUgTc#uCt*9>cvgYqs2J4zv8=yT^LqryeC5iJZxt$v&!{r08`EN$jZbt?T zbwZ|_GIerE##uSJai}7k8G0=0nqv=25;WmDpI^QlF`n%$%F{PQipoeT`_=DVb(k9# z)mpX}jCMacqW6~6vm1<$gEbP=x#~#|3VokeNj<@mUe43&Rm%-SXo|cForMTgr8wKcu)s*+c2I=DGySImF)oXH~smZyGvg#+=y<5}IIt;fy((f?W^ExJF zRtMtrwRMLBH8I}iCb>DM<3_j&kvgI95Y=wRi2Ri*Z&cvZ*E@As1<$~WhUGxKTSxV> zL*bBf&$Az+Xd5#l^r5VXsgG4`TtJCg=&E6nFy^r^Y_i6!wv`YH6+SL#b4PhDN*Z5(0Z>SNrV7mIjS>v?XST*BVX{hslP~{eCM{yP1D4tGLmQy-x>q1ds0M zS%z?krElUE_5$MIB+hPK2oeV2BbbIR=$_15`Ph>@7z+}MT5L$b+;s#RW5mr(3TvG? z4;C^}=)!>zb47yP!Lt-=*{+Y5o|qft3=M~4l%qsXvXe6?SgErZp5*DQcb0MB4b=x zVC({l-EN-$cq(SKaUQ#Eggee^%5xZnG@wY2UegDBevEy+17B|NL?2IuVz;=Jrq5nPmn~qd(d<>q+{^~3;DC{|*#+P2%aBoY3+|f#jzixE z%mAu9dpRO?((bBbFA9Pud<%{UR7aZTh==w>nv?oTj-qB2lfKd|`wcV#%gqG)-sWO{oQ?Gg@2AEcDWbD5U*43|8 zqt;ShS2yw?NR4h26i)GUwNFR&CfJmOZP~1l4eJT(TVJ(tToEWdLJy)s90pJrzU~s{ z0w@7SAaV8aC-ONyuDy<5KBjqZQm78&CX70KRGJewbyNCeYDM1beDLwME95dcm}93}kvg1C_0AHZs2m4xfUFD@y6-OTZZrL~_nabo? z8Pe4hjeS!}u^ay300Qt<0bXq5aPQC<3M1^NzHe5C{JHT4!0tI38rgKi*iryWRbFS) zf0EV2oXorLBkECip$Gk{<2{NBfse~EwWj)dCi&xaC&v3csi&8nkd9C0FAn%UlJ%Pj zJ{XBjCCJttL~v2ZqQr*^&U`)N!mCdn&e%0F>582V6RtwEBTg}N5xrL|9Bqen9#9w+ zpYJp=8!Z4)ODeUIB>X+rb{L6B6nC?ZkJzr>4a~)PC3!H973R#ccifWDfz+9!B^DzB*YV<@CFp=A_ z1B;5r0a;dp@$-W9dmoOi560(mF~_*A_aXZwezQCFBZ2q2}zyGYC^g3?3^Qa`?7%Sey!Q%;AHs|lA zIT>V!juoA3htekMcvZDky3u>{XEPXAi30Nd%g?X6Afi2S)!#A}SR9jS2Mkb+#6*-Ha~ zpKqRMwB(|MBEpO_1lngx}m^MFo zxn=3@!DSlshsVTsi4R-z{x}fiTA~%@>ONX#)oUaWEOUDO;~+Wh_MW>Fuf777OyZVg zvT6PJPMc6qZn3(tJY^67k)@iDIxFnNPgwA%k zEIKT3$A8)DkkfxV3G1`s>-w|hhH1$@2O4YhwUw|%lc^uFI}!Ap>>#3cv23A|o;Toq z*G;nN&My6c9aaAsx$p-&rAFIJHu%MYeHLrm?sqqh-0s9jFj*y9m;1!yt&jHB2h+T! znch)J=t--2bV?;&RN(IlYjDtgV)oW#xZ^@&l+bOl z+tQr&S@PW<`C03y@DA)YH{(Dy!|*<7s9f#o!l!VRKiE{*qdC4A$ODqJO@J3_2L z5yY}L@y``JKT3FL7}WC;YjN)vMAk04P%*Nb^FyaS)%>S$Kvurgzuorv+PY#7C--qP)iW>xnKC*J;*7hdYOB>8(4c)rld_o~Dnue?0-9e@kRn z9znghg&{{_=Ys2dUGXW$%QohpqKYPLNAtEJs}l7WOH`r0<2J%gvDe14{SQ0M%DW>a zXE57^^?7mJuz{gN_wiEV&z&12g0bCJS2Uv;@@Q6t*B51?x{~nY9530Hv6k@BvOL_j zrDnW(ey<}Rf^E^bVx+tlm!MY}QCCB?jfcx1yn4S9DvRuoJ#5^e}+fs}UXk(!%+(flM0*SPuP#E5|2`Ehc?iM3H3xFu;U#Me*5sx@N%X=^ zpgHoh?Qcr%$OjDb%Ljp!nmR-`Df|d0%b?6aZlDfoGqop0DfzlsgY0>bW?*xr7;rBu(bU#N%95^1_d&KO+MVycy}8 zb60A-O^6Gj;goc+9}(pX`~4!dB>}Y_0SB8XnSCYMdR5W-1B%*;BOV5sYjN<0n%Wr; zk;5q*;wI{(q~Nl=NfFXkp{Y^%(qf{hRW=+Sc%rrSgtxwXND-+eriPy`sIgo;1H@jY?)5|`sX_Y4ig!vlg@i06?nwUo>gbQ2dfpj9_zHpV;XxGXh z(w;j-%4$I@=t&Ul>DWW40vB|G))Gy}C;DFZZng7R+N(#cn={$Db%mYENmf}6gK965 zDdq(&+}OqVF2=Ul98M1+FPtM+QW;3OApqn${|j%kMnaMR$Og<)xVE+QXU3dUEji-? zvKp&IkWj$aZ-T*{QMxjai#9o_-)#@^?nZ}0 z^N^j~RWHoze`^64CEfbH=XO0{`e;_qtRL>XeT83d9<**q5|Sua3^9jwIJ0)?Od5;{ z6y8-5F#uaX## zV!Djvg@7cmxU~eCOr_*d@h$2OvCC`(Fnh*Q^-SsvaUqRV#dRLAj(2x+ofy7m$o*#RFxXl8SNdUL-Q(ErbA=~^tcWl|wUi>)OEuBnp( zU@xZ*!ugim=zK)8+JZ{!jx2G%ho+1(J4Pkjviall9MP1v&6yVL=+ZU&)13LTz53%O zzR9+?Rz_TFlrd^-v1)9q~MIu4;aSyiJAi!tt4?} zVmK#-direL3#ywS(I)1L1h8I!O8%_eS&%Zn)u!xt>E7Q8qj$)YdW%~++u=sgtGcHm zOL%+APlIarOCz6J>ofqXWHQ|MVuCCdXc9z%%%)rwFIB6xvUNUDbZ;cLGqqMzl-bd0 zaC^%KVK3eB-l%+GxV*jh$hR&tRi+v$IX;%_@9P0;3SHbL;oAooWe4+*>|`;qZFcxN z@rj$FH`)f051D#0zDE5zESogmg?C62%_PA~ILOEx2{AD% z?^jfX8G6{Lkky3^3Z66G^XuQf?!Si2tBS;|2znqyjT`RB15q0^ds<3o-ceetF3E6g zU1U64|Cz2+FVYevq?Rzq^`U+F>dveJpD7( zq1~F5TAT`v^?1&zW5;sGxxc5mlb5i$R8@2oP#@JY%~m*~l&!78dUa#pZ$ku6^kTAiuZU-zFeS%$K73*B)3i{9z~GJH4N&5 z=_oYZ7pi#r8Q{txn&l12W1XT*kWB@Ir*3!N((H!>GMP>4-u0U?FZdY^Qb!mN&CN&* zL6EQMM3g~laVvFk&l%*&5kr&`kHYwv09w+GG>??N;H0r5=D)+7+HX_sbI^L;XuX~; z`OwWFKWEG>7yR1n_D}td1iAgQ8EjhWVcEye2U=H5fnuIY)IBvq%#;{F_y`0dq-m;z zXEB5POG4M6kh(XLBNac83m3^UlUh%v;Nc}i*;PkHB5Q=Hb*}J79sgp8`mpuGt7wL6 zykh{AmxcMI&Tw(c)SFM+aK zCd}U+y|1C0{rBL>$fWP+luvrk-&%eKk+G2@>T>nLhW1@L?&GrtI0C}wn{DWdgmE)+DAQ|+$!S-u4%zGj z%gnXG4gSj2X^Pkuh3K|?jJ&kWM(cHc9%)U`a!wPgT~AAjC)L=c)btHeaE#AO>bYC; z@joRmalf5FFOOV-u*Z+P0YZ~rytnnY>6HMGUAlLX9}oJO7wV*^&csPE&^ofJ7M{01;s$9_wx zxuOGt8akJq6FA$rb?TD%Vn`EfmsO&`xFu442=Ib(BT$S8GMIzN>Gti;8rc%tEMOW> z>m0aq0L(hAmRfo%nR0whfjAT$4yqvPk^pLJm9qkQ{WzFa!<-?s)UsLA+sxZT=g~it zo+di~bNd$M+i7nk7&~~zM5BS&p5@AOJ=)5U^1^FC)&L%v^!jp>6 z)#b_tKmGlBSS;3xBSz41G*XQ{R*juDA~#ruu}KSvkZy6e{zhB$_Rz$jfQGJ!5{J)Q zNfbzUOsEFuaxPX5*$-)2!R+$mhF8P@sbDDotx*av@#?Qc)x+TowcKU`aaCDh6<|gvC zMJ-l^G(LknI_mrS`sU_a&p(DL&iVml6Fy~mf<;Ig6S|qVS||2n7{-+b9wy(q8$5by z`?F}=Zv|1c%yq4gKhy2~T(S{eTf3%qOZvY_{Zp}^EhraBC=qZo3GB^L3?RifD+D;ImewOfWFO>$dyyS5@%?X&Q_3Wvca#Fm`(` zQa%;$;$#h$d7$x6YTkvHF>1Xu?wT(
_xMTy>DUnsgD|Vj59s`5^N{v{?WJ zxO9uW7O>NfhY(%3+tV9AV4_q)F+~&ispMHg6l~=N->4R=fwLycaJ9!d&x$_{4Gm+r zw?%z@#KEUu*!t{V6VqrwnzQe1A1J%*^r}*;4T60FNdKn5HK#vXH&gbdPETy?|Lhz( z<|tEewj;Rj^Li}O?9}uAJ#25y2`GDhm>z6L5A#U?2h!fPhoIn~+1*oiboI0L(O`tQOe>^E^ce5$IBYzrhBlNX{o%XXnR9593T*X`kCyDhf z)(>(LO0H3!zeah%ayZWH5T7Dl%LBfeh${B=Ib-vy)GuX>S^It#;V=%C&5P-7q5eA+!YZgcQePl$seniF$c zWlsq_Dmiwu?S0b}P`Q5;8DBU4z)OURg+am?M2mQiCg*9faGthje=z)gy@|)pvTPpM zuVaYN_JwUtk5El72!9OWKkMHM24`*$;+l0YlWtqx!oose7FLBON3^(NLG?%!*mE63 zK-c~}H@$mkH7r8Y^>=#{C0Zso`U{!sigV*%s#I&ehE{g1+sXp+>TW_bw|OM zne~i~hi{%GmbR!VF-v-=oj-qmLn@-h=I^P;>jiG9u6B0la3*OKc)M2gb3o~0 z%gfQ(*-xUtSm*rzwRikmhwq7&0{PFI!t=3MVViVUg{HOrpT*AsxhF0S-u?IYv86zg z@X}z91{NF+gK7Et9zLl~R@=K3+W2Sc?-khcCgI>Lu`0mh&aF*0h^M;Aq!{qz$Om7_ z&&yK^ve2x2DU)6+U6i?u7z&)wgz%p?b(>QH0}S2S68O85hs*<**F`&d*UomHnZ2;HFfrHYX#PM#TuG+N?gh z1Q{596RZkFs`Fs=&cgdD@GD*WTk5t1NF`I^V61c9nIHm)KPs78_lhtT`F+DClq3PK z@Wy8n9!mxhrpMixiu@o$^Y51`B)-$xfFlr4AOtPzokX#GT@_wl-uSZFv%gQ`iw&SX z9T0eh5^&=)(_X~}YNyGsG8;V7fxHWd5_tWW%^9)Zr*B;cZOV8o{XD)*8_9xEgHrzy6uu&_~y z03muduYVVrxP_h&XNou#oZbD1TjCjo#^np;^`t`9792u7MFI*~NT1c(-K|CQ6Qk9- zLx*?{)N}AS6~z2^hYuYIj;{a)&o zdisTId8}4a^2#XfIsETwIYZGM*}Q~IN=z)XYP-%T<@H%d2}s`GO~k6eCceC+ zm-Hye=*}DSCNeULyJV?0IRMWskRXL||F3t;-K{_PVl4#*XJkMhJW=tN(-C6<;hAMD|c0Fn}9;Rf^n zeTQ91+o7ni@JWXvASvMfjmaBa6|BvN8G2%RK0@%apxgvaF^z93l8x$8*qmj8s;mp zX80fOfV`DIh$dRw+7>qw|Gk)hUi7Q0=O1G*LJq7aOL=g2+-r)ucJEgxP_kt=iMA3U zmuV$Xq^=HRZ?*oU?sTo8y1IGbEe5Lbz%%^M4snw10z;o^JXtipx+=6T6>#LmDB-4L zjI-C`x!x8qLfnLbeT`f3_yMpUWzU-~u|+G##>aEtx?~=M{ZC#793_zatP^36?joSmC*}7fZ*h{iyl_0KKz^Abh^$LkgmI=~ zgbZdA_^N$BnOFa~K{7Ibjrw`7W;H1Q^O0sMaZ_R?JfRc+&QxlT<0~7^(H21J*bnOg zv>s6if`B&XTa`ts_5Z4jOs;u0G9mvo6uA*}O8l711Afio#voZT?7v>r>#2~Y1F?9b z4tNZLE4IoqKJ{F#RwUuvv9^do6Dy+SCGn?|C(B5)q!(WD3_#Y?%SkbJ2 zct4MVVJB1l7R!w&U5ES2zr9Xc{ha65QnzOd;(z_w?0gIOII!!sNm-jJ>s5d%;z8fH z|9bjwG*0H9I7+xUM*$|bgvT413sxP7&(4lVwl=3>Tx_ZGG%(2E!^d~;4C6p_23%c& z$G;{&ce`!EPz54w!~RiF8lUjj5N`P*Qc1>iE;->d`+w0+{Pr*qkL~1nZ;*jH73*K? zULG5RB?U{c>btQjR^tGIfuCDVk>jiS_iw~3R*9$N{qEw$Sl;*ryf!iHg5WB%w2v-e znFjAm(8$=x1WM%VaveHKG3iS&#b+%&cE5kaQvT6zkt8Y9UqWWnOqPzX##`I~d}o{$ zCw&+feV9s={cxO>;oyWA4b$JIRhjMTdwX?7;9-LX6MU%I|C1XIl8jeekN;`FzA2TE z!#Rwb5eorDabXA}C{3|l`a%+Aj)*IpkQZmKI z=K%c=50}z(ynmcQ*ym|mxF!MQY{}SKI}s#W|2`fD%B_t&Vt4E8MOc0#uAIZ;5T2CD z6#orJ^cN#+vruaJ{{)cMczyhy(LuiBAKU;9@o$!_QViTtfr zWa3ZO{)N`fod0RjLyar{!iWEB`gD%}Z$eII;s1+-oXGlmGBZNGS4k&R&z3GVYWz`n zc0Hz&Ch)@r#&cAE%7x@61Z}f?;`=jA{YAgfwb#YBA8Y)niB$2rxA7O-Cm$LXPvwk1 zUb<&k@ZMkNpw@k}mR77CZ0<~5%W%{9jriS!>h*R~6IIlkb60=fPg@-@GJIye)9$%5 zHh4IcTDjwH_zhr|vYp0C{sz-O$VRTM%=Doo2QK$jW=R zaZLUD0>w3|i5$VM`L2p-#c!q;$li41juCuHXF+FY+*;|(1M)dIa^?<4SsF|!!IyR? zhBPZz$tH5X-uM@-lIi?U$JG9(h4*Ry)6*}y|7r67olN%si`jU=Em>(AD#)zU^X^PY zRON+^)X)>XROjzUM*BlP z0nx^62`s{v50@H=lD~vlVIeBvqjSF+(sm7ac>+wU#|`q%Ws-6$Rda3M2x=&oqNi(} z``kb3LrWD=^_y3$;;I@yY%wcZvc%?ZdIp+R5^@RAv9I z1sIjIETLR*s9Ro@HOf+!|J57VezgyI$y1o@g(H`0#YM6gb}oT&A)YqM$M6}dgaQGK zC1btTs%&aOo8|Ml`$;B(bX?EhnMOti-KCZ^nFC71g9+R7NJ z{#pP1cxsD!Q2jin&ASV$FTN%+g-H^kYP6m)>2GROmRq80 zvZJ@Vmf-umQ5pmA#-+P#F*YsT3#jhIvWKzC7g+;iOHu|mW|s%z%!j^dlhhX{JYs8z zv5-D4biu%fy*(%W-f!fwGbow)XpOl18i$uxqrRD5A)82g$Al-VyIFNZpi1b_(;#&J z!hDZ01Zh zZXC=lE%+#0W}QAhL2ZsIP3JB-EBEslRf@~vGc_mQ#PNKKZ*`uPBMUo&FNq&q9=Lr- z?GS6SBIb4e%5~)+RYRQ=0oMJUqTT|!#bIWJ4AoNRG9gw|2%EU&s;r(NO|k{&*J@bF z0R#izEDo4T)r1FD-%=*TJea;fcY$oL|2BX=W&Gdxe7B%Hp7H9UW#a@6E3QHBg+#M& zpR?in8O0^WtSKW!HBW{ezop-G2-tbBj~dww$F+No{`9~7-FQEwRRc#C`Exq)q-tj= zzjxum4ucWh3*(eK)g*X0gLI#u#J4RtA917U>!r^(7Iu5-J3ffA?z^w;OBG7{Y*N!Y z6Be&yZYGU&AE&%+t&pWMy_>kOy^#Y?K@cri_~hS8j`tze1%;Z2I-`M(D*$8v0%v$N z#yF9CpwEQ6F$>l)RHo|CQ7dzV^3OKG5{pQ&_MB~4CzJaIYjwp2-k2-WQr( zANVOJ6iQF*!}_HHgm;y1xO9vaj7*B&^AegDyV2c|e-rH@oR^g4>dxikk#Z@~3E3Dn zHB8EAjR>x$B>Pg%_*L+QW6kR}=I+{^u4&QYXy}GnY)G87T~D53uPmQlcd;|Q0K4%M zUw*S3Z=hP=-c^J~)Bd1__PpWb`srKT6yXSYDZufe_b;1*j8)1y1;f%|BN7K18Y60` z+;HI@yNHO;q(wNThcDk)$q>*6l?bIu0)UWWh@SHrt10_(~@|oU0B1IxP|9B zB<-LXaNnOrvvgiJ!O6St>DF#N2(uM&Hviz7r;X%_tXBT|o{Zy7zm9#f!iR*6sLS%h zw`Z=X`wa57U={|?%C$9d=FY+?%enSN_f;Yyd#9m&j;^{p$~NW={1ElXS?e6z?=Qp} zf39k)jgW=i5PTA9?top9tRUIZQA5>A{VQx8u_|EBRNv=V{co>N0Qic!LhUE z*fwg_Ph$o`zK1TNzj4glMhY|u84n~34_~s@7(m;5d)Nlns&?-fj7r*Pv3KxjHWtUJ z?s*bzn+KwGesCUQC7?C;A%n6PHRNv6FrbfFm~=GetsHd7omB5ahf^8vWq2jbY#554~A*3)A{}VNpUtKXLCF zP+8{J^7_YLPCd10w1Rakoik+Hm*wdxfMQc*Q6|vc> zuMjURHg$b#9!Do)X@jN_`pf;VCd56JoKk*#m(8H?PIny>hper0FkehjeAXvB|7*V_ zI~?g+)V^ud*@$q{FVHrF?+0w7i=+&#fUIE+77Qw9W%fU5Uk$!6jM*`0q`HBMX=md) z{5)7CWtiQVz_Oh=KRknoIx@~RNq*=JOJT*Ju1n`+S){kHE=SllcH?B8a105$pl#(! zJvKl60Ue03ZESB*QKrK2Kl>HL#p#-jVvwj~Izv`|N=b}+z1|q>KCE^TZvHrp=?^i- z=wOW(=GD69wD_nOCgX{<%SK0*PA<2VOR}f!+B)U3gM$+0#CODNi>)%Pg%*n~86U_u z8gpTJIJJMs<89l*<+7X3GjB5Uc4fu17}d1#>yKG450;zznjzE4HK4XnW|h=^S{VH@ z8uJS}R_AJkzqb5EI;mFRPW5~Ix~ZcN$HL?dIjpa1Mvvb&jL_ikX$t3|T^++@9*%e8 zu#mZzf>@Ny0eF+Pf0bTnQ|`UZ{o0c7jsaXE$;XlCW3}ypw(owf(lSsm&5j<2tLy}_H>~J>Tl-qVAmhD&(!Ia^ zAp65j)K?l}zJ0yELV?cf)m6@voxB~ack2;in=SoiZjp+k|N097J^OCvU1kD@;&^wE zmiBUvcjBIC8i*&e(wx73DH-XX}NCh2)8Rqrzt6kLCv32HJ z?DKqm_^s~SgxwBadp+D!S7KXW%ASc|e>U&hOwCdpLaq+CER&Jx>zVJR@ao7BqwAp( z;^JX))b=1s&+a60AhwUAVtM=sfioMKip(Hy6Xt?a_)~dD44`-%LuOZUm_^nQM81@a znDtHFS^tNI{LqD3(yFa7QZ-EYd@GZn8TZ&l#`G|+j-NHEtF;pH8uC$%MX5YPsE+#Z z`1Gd?R-08qqXZT+S6pr8Xp;54uXA6V*SN$25-{Q{1yM&X^-_?~UAJEb{VmT?2;Dla zU5CEpLpI@*K62wz?-1F_Q_$CxtI={8oArqbA>*Q(#C$p6;>4hkY65z!m;4y%@}1+} z9X@Ti6FJ^uN;^i#FUv)whRrh*O1dC@TQSRg>@r$+O&1r!H%~p%?b;bT2)1bAtv=Cl z4Z@ym*GhBhCwVUQAge-&-=*73ON>|zm*_>^lZ~iWTr%{m$f>DQ&GHHznfo|aGKZuC zD>_&Bv^&2WqALEHQ{b>2>IpMCwsm5>nW;y<`TQus3i9{Rw_PLix zvjmsmZV|enUt5Gp!TqrQ9?HHzSf8}K!eV#U*M}fq5Ah)eb@Ter8(8~g1!!HdS`pJJsyi&QmgjbJ<$Uh9hBJc9aQl=Nj-}?J zb#c{NMaubf{Ff3J$~# z!Y78%ZPOfKnk3U8_tKXB>f8*TuiG|TpIgR;`FzWke_2BGze?QIZwpxYQ=_2FCVSRw zFJpqzPjWMcq@Cg?+pcNhT%^tH(rjJY!jxK2Ks-r%%TlnhVmhdRDdx5hR1Ol>j*PMt z6VBaux1(EE)KSl`Yq7pKRO*V20(O{5Sf)fPP<*4vV|DL!h^T$HB%u}Vh*R&{sbrDnJXNdw=r)J(2w+uCK*+nSS zV$)f@CWkmoFXlFx4>id2WPhlks$3~=8|wJjE&0eWA=3)G2d&SPBbTGYC|pij>P|P? zG0nP{BwfaDAlYwM-%|K0!4>M>qbWD0K&#>2a`Sm~VYxg-1b^2oQE+;#8Y*IgRM8MI z?tG^mb4NMIB-8`88z-CRgs6+-H81Rt0g)QrM>-_aaw29+MVs)MhlU>R6 z>qKtS9mO>%8S9snsUh)`Xr1noIBAoxJ)1H=l#W8QB5mk(^n#q^cYQQ1KIWc_N4m8T zgXCUf#A`&cSWNL<(VrS{7qn&PGpOgZ!zZ+KPCy6dhr{bKC!!X~YMgJGuY>x+LKG|d zL-Rz%;Ezvowe^AVT%wLt$W4!xf&ScAM5$Hf>YQcq9FlPCPzfxPGVr9zW0i*r$&c?_ z>@Y?6oge?|+9_OmgMO=&r;^E~^$Mc|htZ|Hx!t}EVP%}6Yiv^Q!SJg;#kG;$$q&|D z9vfTT&=dcG*F1O~G+n6Yt)6EAD{j4xh!-vk@8uP!VBW&D^nc~QU4)aSfl=2!QUl=~ z6u!B75$oe!r|0NpTAd1s9=*GN8Jf4iKTxtkMpk|mIMsJdh0|T{eD$`O)1G%DB^?OL0;(Y_`z;${!xW_6(PHr#R$80czIxgDasFv8XH%jvT}#- z{pI_)Jd}#f5~J1sQE9$R5$RzaTpVtc*|TI_D+bZeFX&-0OjbFDp>Jb`pHeeFRZ=&r zg}SDZ+@A%Z3mOtDG~1ZURRe%eD(wC08kH-g7%HpMzmVM`YWqzBeRZS%9!uxPX`SUE zL$19wmEv^$VG(G{FsrzB&#Oaq*Xz|ifmfDVrUSNVYX|jmi+6A*v0EB_6vfEYVoCx0t*s~vx#d@t6Ks8TpG zPhFH$Gom_>a4T_rK_R<*XigzQX*(v1U^9K#n!Isz`2V!`ok2}^UzjL%1w=$&K)^zg zAVj2hlqMjZ5DAc|fOL>9C5c!-q=SI;UXoCxL^=_X-b?6BAe4j-p(ku`cmMzIJNsp4 z-``{=lbQRwGxywc&pG#;=Q)AtkLcqDgdW?@0+W?G{@M80M3&@%VcVbuYs0~iTbf*K zSCo#ByNjbaxHE4AF+ip+)fb!!kXZ<4|8fJ%d;js?1mlLQ|FDa}z2Cg5iY7p6Y9I>U zAnf;0;H#qCZ&!p4Lj~VTw(}Ec`b_*1y#_rL!AHnX^5zP;BZ$d`R$7K0x+Gb<2X?Gz z4t6m_*{uMcV{4f)A2Z^jWhmD*hb`~gf-w)t)g%jDKehe~~Vd3qkMw+e5DnPxp$MoKbJegk3BS1FSG#ZD*FzRSC zW%=ieo4p&FpGA4>bw{c+HjQmIk~U*4$dD^0iC$|3ig_Pkn#5N_^0lV8G?0Yp!xp*2 z-RL2&Ydm~z z8knG6)ne@a-mFuxy%JLJ94f3EZOIXX8P^Awx^$A1mnXr3y3twR5LFA+j`9uZZ>`^? zdf>d$8kF%W?j-i3W-!7>j(!RP<&yB;M=q>F*p}n6y8{?@N2Q9(zez@#jjbxO%jtKd~_Ja)fR9U z$1#37G^P|8v?8@GYY}~MG$FL!C_ahe! z#j{YQ1?^pn=E1Rdd(529N!03pG)o>t%T&VJ_4_!^vWwZ}8yFghuQrG2F^$}YeNeYf zEDK?^3*hj1lwzKWPB>h-smUTr9J>t!T=0yWB3`*8IH!!NVWD< z^xz(rOzSA#*8x)O<=8UamAJHD&HpyiaS7GJwK6c8Y5l$V+i|`ee{CN_D8uNyEToX3 z5rkHliQV016W63EQ|h3H1MeO!Bftxg6-EWpdYwCGT7$pKR0?Vtm63&LZE_59yClxp z+ikWFWerY@7oxbjTVL!7&K?eWHs`H+$7u)DY#y&gZ%Uv-fw6RH4BGsJec$z@wMjAk z-MY~O7u*p-BUWvn186?}_CB~>6eEuKU2Rmkys3*lO19PY z{4F#}D;SL@{X+Phf0vjnBB#AX(JIU6%9Mm+`hNW`E@>+o-Mo~{UaYqyB9ywe8<->O zyUrF<*8@$vk^Z@*ORSGU+B(*5%$!sUL0_(%#6mq!ILy` z4?ENiv99Ny8IO>O*O2*7gJFenFj|HZtHkEGv%E=~-rQh6eo}i^ea@~k=L+xdPfY>^ zrWwo@!2`vS<+3@w$d|}aJr&t*kt(x)f)De5;mCC3GF_x!cbY_s^%6zLF5VDg{zB{> zqz^c!-D&VE=N2f{4db(Ls`FOLn~6FpC-C-jA=q{-v9!E>wxDmO#+CPax%{nx0e#)_7Gp^1^=R{L&2EFq7S*JUT*)mlmdh+}%BML(Csu-xad)Jn*|bM+M6YTeIO-U- zQZoofh(jxGNRC!t$LWs)eCwV#;qHKU9$7Kq^lM|}04CcRj49C}R1?S~ly0`5rz(y_ zkW4xtWU#q5noptc3Li~C=XL0g!Rev96D^(}uG3W*f#Gn?GL_cJ>QW*4RP@k|)(L*- z4ijlkI5`)a>1q3Codd}qA!Jo~W3|w5-ZWKdWz^P5_MR05>Z z(`FYgi}Ba3IQqQlmjQ`C9o(pd?7Zb**+^K5wWk$poL+})T%qH9;VY&cz7y+SXlzCB z*j&C3E^y3$q-Hj{z2*&Gf8RIt=FwYB#?qCMRWAX!XSye@SVl^=x*Wple7-st;^?Ug z;eiu+TXC@P-L-G_=-IbHC2_uDJD&;w$*B(#d8NdZdpAh>OEGM_-8~8M>62E??buh* zhkcP;nJe{U9}{(!2CP@Ma$XnAMv^903ufuw zT-0(NS-ZrwtV5v~<*HxWS#9Ybue}H{hWNN_PN)icJbXakY#LK@U(vlhP8VxGwA2YS zRb03smowSp)^%0^R0Kg?D$1I)O%N4Kw@MqI)AtB9jRJFXZ#{kF9^T*rSGHan{3}d5 zT9&hidBUviAMRyG(qxr^G>w~2uc&3e<@vPv9isknUH}BoCB8&~V*#TBP5EmgC{1qW zW%W5lFTOq5Ttmtg4Q;1dY;LAun)!LKX&?@IR`hZf|7CIh$q2`z(}b|^nhSPVPeiN7 zrLCe45pkF(4m#vWe#=$(aqk5aa97>YK~RHw?>xB<7UdCfj-dj9oh;J<+it9(-&=Tx z!P4THuvLCeGHE3C#PK907wNeFhXsRGlR{Jj3P6}6OpC$3kqVxzh&9z{I*60 zy4mFH_IALS9W(wLlq{7}1Zi-1;YJId77a|E&W3*T+uc7{e+C(C$o`}yI`{pl%mU8+ z)$57T<^c|uu1KEaNe{?Tf_lmDS2+jxwgqO*`dydk6Ql`G0sHhO_NL8*+#DA43C zVX8CLJROgh8gd3-l@8n1k+KO z-x5#CA0E<<_+W=_PgG-iK5L~bR+^f_Ga|Nn6Gh9Kds#JR*FT9?x5~zEYPg0VpJX|^$F}bxe7Ct}M3|G*+Xq$-Z^2V39%;F|}C~>ti>mbYZ;6x-z z$n>TLY>=dcae~+NTaPiC-hEPjh#>T%ke-Dy7A-8AYtBy>(4e~6KA`52FRb?m6D10t zy`B2$Wy?*v^0LPkh9A7%iW%87q9GnO7vRX+ODO^oilyr_v`!3B*f*Z*I%x-Sn?~C& zF;<0tq=wlBf13#%ddn*FF3~g>+9|yN*&|^+ZG*|g87*(15Nn$931#5fCcZXraMqP$6SyZaOOD@> zz1lP2nv7vq`4hL)5&qun*$*hWTX*ox+H>CFEW=e9JeMr+-jlw#n|$pxJ9d<{g8p%^d{n;XTi1kZ zbJ1(=>&0rR>M6J?Y`J^&_*8QGeT^>~#>5B@nTqCujHOuu$1%%@hcP#I!QlCYCuFgG zk9o7w&3qk(Fi|7;K&E4G30_p_@gQH(^|+i6W^e`T@Jvq#wtbuW`xZzjHN!!uM>La5 zuUj~Gdn3iQzzlM?{EyxN`pA<{`OJF3Hhw>NC7cgCMXjKjBkL7>1qh2q+Lj>Pc}!6E z&bKnN-8q&n(xFUbM$_8XYzcR8{sFI`O#s0i_*j%sAozPIEN#3ELRc>BAN5w%oJ?R{ zhCeVLbI~)fFf2WW?({f=o9Js)>V0Kbw-YrlR^8wlw(@*SzhF?fm9lcx4MVeJ5lphVGfXuZ^ZqV^^kp zXOc@y#y3|5^>*&!qAVEpb<_p+hb(}rz?F*Zzj`|A=&yA9>grv(>GxH;;Y$SDtC zkzqpXFC!nNdaGia=<(8FntMIdC?&$CQ>m!=5M2 z6%P~Kn*`zd>5pc3S~0|IX^6gu(yFdseK6W4_Z(AqPsu|6Ttrqs7Vu44mt&`bu=YBx zfekqH+ggl2n=<)a23oibySZ2}%eDWc);omDQ+g(~XLQE%bxa(Ow^8iAAz>hVFt&DI zvtrI0;q4ihXXEw>O&Z_aJ9<=SA6+|jyOIn3q@Rz^>{ffphcO&ATcFLCoTeak;}%mt z`|JQc#USR;FLv2C*2efHR$=d98IxCOs|!-G@)6knPhK>j4T^H)uHlAw1vtQy*u`E( zN*u{R1EvbT5VXpi)C=US8C07T(mv|jCd4lFHJ2U=`dZsWb!44Io3Up{sS?nXMM_cm z2W+|Wu2d4ce6`+^^<}ZGPm-YHNZB&vKP634H2^VQ{E|g<(h!+`0Yx*XZ>X&)@81HmNDMEY{m=;STZSypQ5fMMjAO?>wT0@ zlh1@`OLo~(WtFw_4`U}U((G(H+48v+>RC(K|HcA@d$fow$3cF$FE@_W=nkPA8rnyp zrQo&H^|sxZJ7Y|oL`lE0K2g!Hx4brfP9uwybSXc~uhPLLGcYSXQ|NNJ8*z}oB>!OI1sbyo5x zbB`I1%GHi}?u=)fd0j?=vzoamUT~bRWrbE{mmfMoQQzKPr;Q@%+zu43HnCb;=}GGl z2P6$2+?tEa%~4U!c1Hi`66&^1-8N5L4nq(nAL!dg_r>&#MdZ2$&EEi%e%>j+zA`{h z|GxR~aR3jp;8ITEz>mw#v6q|4sX_)DhV>t`e4FdH566NpP|)TIP`C0bg6##aDXZsJ zq?Mk<8UrB?JR;8c*jHHI?q;98kOI71YB^P@Qu-w^6}aNs zR4wMHT4!J}5agy4f5TH?uq~NZ2?z>=ka73*IJC{F+GCk9AFWnaXUuMQcCf~8{ zR0(}F_q}Rhz%FlneDnBb(iJSmfl>Mv zqr6PDo@gD{ISk7Ro3~NqZa#F}Ly62#zY{{Cp8vY8QfI8V8gYod)j3fCUOVP3bIeR; zmm{*V3Tr#Lxwe^3(9l?!znPT>iq;r79^RsJg8apVB{Qa+n&$XCy`T7j2Bxetr-3A= z?>7AhK>6*z5X%>U{~#sbq5g}d%=s_c^54O9|L;iS!5?8dI>_P7c%0ojRdn2nPHEV0 zr@QoE)6b%!olb_}mkFChA_|YUigNy9|F#P zH282@bYC+C_eb~ynk&ituj2iCvi`L*bpPKP(!WNDR`OCmS_iLn}9esmCnHXs2ie#lueyQ)`7OUON}O zY)=OMdY68}T72^v-hdt*L91KO<{tF#uqR&tYftH=u67N5HHzbMR}rl8 z1$)%iVyY&+%U2J3rQ~aCo^ZpNS1&ogf*omWA+Hy#hPeO~2K^Vf1^bV3`RvAN-Sa%h z9Ek~#98peqQ4otSXe%lQ#esgTJ;wWO$6m7=#GA_3z*i2!J#xA%WOE10U& znX+U?NXDo3&*Lr)Y`cuMj-%E|YTx7mp=Z2_^TaBhxoTK6SL7o$NeEchq zsIFx{TL>x6O3-cIM@XCGF)=x>+x|!#XRJvzkCD*P z8vM!a2?A?<%&<5I*avqnRkp%`=8GLKSrrL=-(et>h=6&k2 z*B=+6q8Dxed~IyZ#m^}(L2b3j;~(qlal1`w=6qZ<+P2cS6Jo|Y)1Hg0MS#HtMNN=v z>6N0_4R-jW7iD~mL?do}cRhE5AM?3PHQT4!l^8f~@uKtt756@Gd>k9l3Mw74pG|ni zP%|r2tJ?pvwsPxU>ov^F4`Ynwhm3xD`pgAXg@mlb9H;(NCWromk=cP=O|q&AW^K6F z5fN6Gc?~~wcj;{?I!w~{wghO)x}0pK_c1{J)|Ds@MX7v6NhZu;mOY$cz&pj9T}Q9% z?>*4aCM*c*dUrvU>1_lsj<$PFum0!Z?SK0`kWrG@uP9g0dl~(tmPdDS-T$08ml@hc z5jgNN!0(}X5w|q}@4dgEqN3vNWOKnRC0o>f%Po2TNfi6`0*hnwF|&oF-Bgq@jO*w{ z8W-19{6=|@=Aau#-xwo2vr6#SUDQ&Df-_ZenBQb1b2KOj=-2f5f`=oyCujw9a9`N8 zJFNCOY_{GJy|13(n&MM2D^4@s(*gHRu2tn}7d%4s-dw{Hy6q^;8btKxLW4KU3)bRD zEmAM6dvTq6-u~4Lyy~#4u&Tfw>=9n8Te!UsP#Au2$6h8Me=ieH{xRl0=4dXytk`Ds zn00!pGyffbC^n?tzJ^%LUPs^%vK^kbt=1_=)|5z5bU~z$%PW_15E1s+CJ= z`Ym>kj7Km-HR!lnFU2gOq(+k;!nFG)NDcoCXKL=uG{};iP=qgSY8BtBn?CcU*UNs` zVhdzEr4})kzH(cFH6Gmdzf&s!WyFycwzquMfRQaiBB(2+VWS#edSs_{P0<+=%cqkvHJs# zQuEsOwg?~KDxP2ZJN3hSxAO&uL(r7cM_$1CdPm>S^nfk@pZnePNXl%J()_#6@9rL) z>uaZu&Tujk$P7;}sr;6^^|Z5vvBPxJY`W5F>va6Z+S5^|i7{NmJ0|Db3tAa}L$2$o z6#e|koqMh?mvF}Q^}?A)>SP8LjZ-EWr;F*&ojXUBl8&HamW#N9BTkoWGt3=r__Ys> z#c+OJiRO9^+gkX!ut4ws6UTnqS?98Xq9?>7ZNK!mX8F%FNowKj4u#a$#}3tGRAKmb zQ)-~eg43n$kKT*1jzD_-*u0+&qElDI+#OI#$8%NDoL`4v4ex1 zH6j~)dqZ;UbRc`z`L0w&ZmDAv<>{WaYxK+pw_iEfPH!>#g$0ysD*=PwAKDmADQ)KY z`{(I%$X1x2rmdav?7Z~fDcr#7H>(zpNK5r_8R6VcO2MaNy9>*{(xN@PVYFS%%JzqM zD!yo_`}qT{Sborfr2WTu9dLa}%kv+a28@TCBu9QqPA(kBvn^Wb-@a=LpDrrkc2~Ul zv_|<9<+ixq=!0b0?L_}m8vb>qyI4Q!K2b?0>9m2-?b?hF7l*msP8T1S*B)LDC5{hC zQEz<<=AHX_>E!b(^12!CO&yzhHsU9@^>_E2-UM7vs%P-Tep+?ESv5QPJ+bU2FXRnB zmyLyUSU-JC(2J)ZHP!N@px61Hw5#adN}svF)uQz2ide*1&bQr-JPFUPC5p7$^{<@| zh%iBJ`VPt7{1YEH#SvA!!MVuu9Pe`Y7W-- zfpFl4c}lz!0JE_y#3GG7u#FG2;Ok|Uw1~-pjIz(BP`w)uabG*8QwY-t7IFwb$0=u6 zp}<3^SR+!6y?OE<6M1AoM?*mUR)zp#crWVSQP+0Dn|&Aa#49PG@sf?A8Y~DoMJ=~7 zNyl?IKtd(#@`v*4a=?V8*|5k5Eb?$`iec;;&L|PjD!wE-B*7hWVwt@@7}J>PFA^~V z$GB+ppipC2*5FR|O4ofhY?ivW6;Ql<;dvmd;e%Hin;?v%2Zy|AF@@PA$}SwOEyQ|D zHBQ1j5h;c{D52a-pq8O<1uJ>xQFdDp{Fy94-nGy7@PY|)sWABRPKB$N#by|`Le&(k zwonH|iJ2M{NA&kl6H1(Si1;}TR*8)Zyse{g`X#0Y9uUJJa7c|p=J@Y?9$4v3H^EWc zr+7hc)_WS+o(kSYt^&C;?=9DK@7KAOR_kRI*F$Ubf0147XHCsf8Qj}~lYIM%b7I&M z-lu|F-7i|b)CG*<_wtRMLr8O`B9%qBg5`;Y zGRkaiz8XKe1j^B~2%8{xU##>&&V*@}^2XFv=PXb8fTZD1S4nKi6#u4Wn#zw|Jh>eW zPZD`qt?dn3j4Q@W7S5=zuIh;UbjC`!C~HAhl})F7-dW4n#SQLkGKk|g9ysuZ~ZUCwF}ez0o^Eq^3kZLYeba^3r0;dY<}z>F=**UM~;d*>^h z8IGdmvewN`u7mGZ3ZVT*_cZL{C~KtDJ}1e+_=E6yiE>Ty*)8rl@q@<}^fl&x%ty&> z!Yf}GQQsf)*{y8Nk)NUo;tKjERT_^gFTzl>kMm1nel^SS%4CgtS;y2CSK8loetq5K z((kRyQFE9#cYuf8*5czgV~JMUp}*%gcy0M<&l?X+3(#UnpKC zmo%uxxZ4*-?2g0Lq97Rg{f}ibOr!%8y0VI=+OR9&<;vjl1%9zjZH?^U6f>>9+9Wlg zgRAYq>jA+8AIa==*Jl=B*GzK6$zNRZ>s*UJQ(}9|2e{;eJU8=KL{NkPD zZ+QX;JBSxx*WVC$ysf)=b=*Y*+N`p#vArzF=P)=nh8$bUetU;FU zoE){x8;Lf0?VQ=!Iv0~r76D$hdydLvZahx<$`J*K%Q4!{CK3cb5h4l4fMW6nrDWU9oAscyqroQ@9C*H@iemfV(_xd?_=3$A=1?*n+PhaBIM=Bn z=ILQTk?-F0cD+7ntF;NL13{}dxoNAwX`KX-FMKb1p!*}Uyz%oxidG0)R?Ld~xsroj zZ$K)yVIAO8IHUc#(pzocpwN%u62?4-l5TC2c>p8=qEl5o^VZD{Fy*wWJYgv+Bw_3l z<0s0eB~IYqM4NS?rukw<1eUNNQSwpfXm*@I?=~Uo6}%HuD&#S$MfrjB3A4wO#5;%l zG-8v&)XsLxFxi)l*)gGdS*i#NLvv8$c&40`61yn_o{~?H#Cyt{7w0E^+ zog3M6A2qvGF5`K!AMStM(9R84JV$Z1k9Lf2jE7Dh+RsZTDtmIs7fB*QBzC{wg~cXy z-QW=Q;voo?7fH#dl@9q5ILBVlSCIG4GH|mQxMa?Gr{{0dYozN`R!hQ6_ntyFBv77K z&zzAaMr*E23wkQ??ncJ7=vLq)*i)jgqkg{V?1Re4rgI@NiM(fr#%1_m+1 zq|i6y_=R+n@-Z7^@k7i^r_kUM7*q-MvkPAt_0z6 z!ShwRrt#{AWY1);a4VS5Y;1b%xn@la)D9+6*jE=~U@m0FRZ?b=o)zy);x066L4Ut( zB5&(`NCP(v-QnFG>-!aQ$;$#W|8u4N{vaQ|Z~-)uYVazVu=uAzKy{So_`rk#23NqO z2(ruq9z5-OtQ((gy|cX4^V@_SS(`t;Fl)-#O*ya1D*5f6P{(~Q)B|keSEBtNt-#m# z8l79u>`D&v3(c3oSt~=r-z`4@67~_4-hIjDnB)y!S>d(C8$?<#`BRHf=@m_LP;Yzl z8yeVsbMpp{uLx|F>m*a|r+*d#AzN%0YwvUpG-x6fGPc`OW!ou5AIH1f0I5h=eAp>o zXr!HD7y34sN|8NktLsi9)Qi=r)Ckjs zmJ-QGH$1yPp%I$IZI=!Yls-5)wrDg{x)bDA>CAAy*e#mypaZkn5;yiz0|6>!Y1mW{csgMl znxdhIcx{>xn6u%V8z83O_T@nAYw9F`ps*nJp29vWipFn)|ws*lYi^BYKL4v#AW-mydaiW}2Bb zVn>Bwjle^XzRj=0LzwRtT9;X43g^WRTJv5j7uEo~QOL_DeTk-dEod4=y*jVGM0y;p zFwLIZ@aU3B)2gfX(PEnPF6HRU)@tIo??slSk`NvX>B1mxPKhDJqb4K1&YzJLF0uns zgPh{6;kljT0Z6yr>)lz}HO(TimIL$OTKu!vD&9_gOAUTK_xzViwd1|k7RJQI`Z6}E z=j6A*=-1zY)QWQ+6?~JD4uzePt0o71qP@vKiXXHOPv}P=_l8#$pl8Ux{ zVHPzNj?gyq5+Z|*8(GCX79N#+ zC1WCPE~-9BV4--p{rA=W^M~JX&VPKe4{K9VHO6eEd2@1m`lyK)Z(B`~{{?fW>d67;mJOi9K({MHu+{Xv)Z- z_BZBsmPGx?jw=Uu(S4GWS~wRz+RHy~LL6ydNBTy&Uu%hNhhaV5%48uA2gLrzXrkh1 zCH(z~dpmOaUU{2#B!A3d){CM64;~yt4XOPvW0;uaCT}w#=_W2R+zS1h0-ly^{zAJC zzH>w$P`;HYej=4C$uqrM2xC2Ih%xrHACGF=M`ZWWx8YYF?_1K* zoodujd2Dp84a#PiF{=tDm!JU0`kH|XLM?G~Fir!x$i6XHhfSjR1nqS?grS4*NgnUy zxo}` for installation details. -Additional requirements can be found in `setup.sh `_. - -Tutorials on how to get started with WFST-based NeMo text normalization could be found `tutorials/text_processing `_. - -Rule-based (WFST) TN/ITN: +WFST-based TN/ITN: .. toctree:: :maxdepth: 2 wfst_text_normalization - wfst_inverse_text_normalization wfst_text_processing_deployment - wfst_api diff --git a/docs/source/nlp/text_normalization/wfst/wfst_api.rst b/docs/source/nlp/text_normalization/wfst/wfst_api.rst deleted file mode 100755 index bd6cfd9cedcc..000000000000 --- a/docs/source/nlp/text_normalization/wfst/wfst_api.rst +++ /dev/null @@ -1,37 +0,0 @@ -.. _wfst_api: - -NeMo Text Processing API -======================== - -Model Classes -------------- - -.. autoclass:: nemo_text_processing.text_normalization.en.GraphFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.text_normalization.en.ClassifyFst - :show-inheritance: - :members: - - -.. autoclass:: nemo_text_processing.text_normalization.en.VerbalizeFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.text_normalization.en.VerbalizeFinalFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.inverse_text_normalization.en.ClassifyFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.inverse_text_normalization.en.VerbalizeFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.inverse_text_normalization.en.VerbalizeFinalFst - :show-inheritance: - :members: - diff --git a/docs/source/nlp/text_normalization/wfst/wfst_inverse_text_normalization.rst b/docs/source/nlp/text_normalization/wfst/wfst_inverse_text_normalization.rst deleted file mode 100644 index 3d03d0230fb3..000000000000 --- a/docs/source/nlp/text_normalization/wfst/wfst_inverse_text_normalization.rst +++ /dev/null @@ -1,110 +0,0 @@ -.. _wfst_itn: - -Inverse Text Normalization -========================== - -Inverse text normalization (ITN) is a part of the Automatic Speech Recognition (ASR) post-processing pipeline. -ITN is the task of converting the raw spoken output of the ASR model into its written form to improve text readability. - -Quick Start Guide ------------------ - -Integrate ITN to a text processing pipeline: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: python - - # import WFST-based ITN module - from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer - - # initialize inverse normalizer - inverse_normalizer = InverseNormalizer(lang="en", cache_dir="CACHE_DIR") - - # try normalizer on a few examples - print(inverse_normalizer.normalize("it costs one hundred and twenty three dollars")) - # >>>"it costs $123" - - print(inverse_normalizer.normalize("in nineteen seventy")) - # >>> "in 1970" - - -Run prediction: -^^^^^^^^^^^^^^^ - -.. code:: - - # run prediction on - python run_predict.py --input= --output= --lang= \ - [--verbose] - - # single input prediction - python inverse_normalize.py --lang= \ - [--verbose] [--overwrite_cache] [--cache_dir=] - - -The input is expected to be lower-cased. Punctuation are outputted with separating spaces after semiotic tokens, e.g. `"i see, it is ten o'clock..."` -> `"I see, it is 10:00 . . ."`. -Inner-sentence white-space characters in the input are not maintained. -See the above scripts for more details. - - -NeMo ITN :cite:`textprocessing-itn-zhang2021nemo` is based on WFST-grammars (:cite:`textprocessing-itn-mohri2005weighted`, :cite:`textprocessing-itn-mohri2009weighted`). We also provide a deployment route to C++ using `Sparrowhawk `_ :cite:`textprocessing-itn-sparrowhawk` -- an open-source version of Google Kestrel :cite:`textprocessing-itn-ebden2015kestrel`. -See :doc:`Text Procesing Deployment <../tools/text_processing_deployment>` for details. - -.. note:: - - For more details, see the tutorial `NeMo/tutorials/text_processing/Inverse_Text_Normalization.ipynb `__ in `Google's Colab `_. - - -Evaluation ----------- - -Example evaluation run on (cleaned) `Google's text normalization dataset `__ :cite:`textprocessing-itn-sproat2016rnn`: - -.. code:: - - python run_evaluate.py --input=./en_with_types/output-00001-of-00100 --lang \ - [--cat CLASS_CATEGORY] [--filter] - -Supported Languages -------------------- - -ITN supports: English, Spanish, German, French, Vietnamese, and Russian languages. - -Classes --------- - -The base class for every grammar is :class:`GraphFst`. -This tool is designed as a two-stage application: 1. `classification` of the input into semiotic tokens and 2. `verbalization` into written form. -For every stage and every semiotic token class there is a corresponding grammar, e.g. :class:`taggers.CardinalFst` -and :class:`verbalizers.CardinalFst`. -Together, they compose the final grammars :class:`ClassifyFst` and -:class:`VerbalizeFinalFst` that are compiled into WFST and used for inference. - - - -.. autoclass:: nemo_text_processing.inverse_text_normalization.en.ClassifyFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.inverse_text_normalization.en.VerbalizeFinalFst - :show-inheritance: - :members: - - -Installation ------------- - -`nemo_text_processing` is installed with the `nemo_toolkit`. - -See :doc:`NeMo Introduction <../starthere/intro>` for installation details. - -Additional requirements can be found in `setup.sh `_. - - -References ----------- - -.. bibliography:: ../tn_itn_all.bib - :style: plain - :labelprefix: TEXTPROCESSING-ITN - :keyprefix: textprocessing-itn- \ No newline at end of file diff --git a/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst b/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst index 1c1ad949fe43..da481d2f0b5f 100644 --- a/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst +++ b/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst @@ -1,184 +1,130 @@ .. _wfst_tn: -Text Normalization -================== +Text (Inverse) Normalization +============================ -NeMo Text Normalization converts text from written form into its verbalized form. It is used as a preprocessing step before Text to Speech (TTS). It could also be used for preprocessing Automatic Speech Recognition (ASR) training transcripts. +The `nemo_text_processing` Python package :cite:`textprocessing-norm-zhang2021nemo` is based on WFST grammars :cite:`textprocessing-norm-mohri2005weighted` and supports: +1. Text Normalization (TN) converts text from written form into its verbalized form. It is used as a preprocessing step before Text to Speech (TTS). For example, -Quick Start Guide ------------------ - -Integrate TN to a text processing pipeline: -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. code-block:: python - - # import WFST-based TN module - from nemo_text_processing.text_normalization.normalize import Normalizer - - # initialize normalizer - normalizer = Normalizer(input_case="cased", lang="en") - - # try normalizer on a few examples - print(normalizer.normalize("123")) - # >>> one hundred twenty three - print(normalizer.normalize_list(["at 10:00", "it weights 10kg."], punct_post_process=True)) - # >>> ["at ten o'clock", 'it weights ten kilograms.'] - - -Run prediction: -^^^^^^^^^^^^^^^ +.. code-block:: bash -.. code:: + "123" -> "one hundred twenty three" - # run prediction on - python run_predict.py --input= --output= --lang= \ - [--input_case=] +2. Inverse text normalization (ITN) is a part of the Automatic Speech Recognition (ASR) post-processing pipeline and can be used to convert normalized ASR model outputs into written form to improve text readability. For example, + +.. code-block:: bash + + "one hundred twenty three" -> "123" - # single input prediction - python normalize.py --lang= \ - [--verbose] [--overwrite_cache] [--cache_dir=] [--input_case=] +3. Audio-based provides multiple normalization options. For example, +.. code-block:: bash + + "123" -> "one hundred twenty three", "one hundred and twenty three", "one two three", "one twenty three" ... -``INPUT_CASE`` specifies whether to treat the input as lower-cased or case sensitive. By default treat the input as cased since this is more informative, especially for abbreviations. Punctuation are outputted with separating spaces after semiotic tokens, e.g. `"I see, it is 10:00..."` -> `"I see, it is ten o'clock . . ."`. -Inner-sentence white-space characters in the input are not maintained. +The normalization which best reflects what is actually said in an audio is then picked. +Audio-based TN can be used to normalize ASR training data. + .. image:: images/task_overview.png + :align: center + :alt: Text TN and ITN + :scale: 50% -NeMo Text Normalization :cite:`textprocessing-norm-zhang2021nemo` is based on WFST-grammars :cite:`textprocessing-norm-mohri2005weighted` and :cite:`textprocessing-norm-mohri2009weighted`. \ -We also provide a deployment route to C++ using `Sparrowhawk `_ :cite:`textprocessing-norm-sparrowhawk` -- an open-source version of Google Kestrel :cite:`textprocessing-norm-ebden2015kestrel`. -See :doc:`Text Procesing Deployment ` for details. - - -.. note:: - - For more details, see the tutorial `NeMo/tutorials/text_processing/Text_Normalization.ipynb `__ in `Google's Colab `_. - - -Evaluation ----------- - -Example evaluation run on `Google's text normalization dataset `__ :cite:`textprocessing-norm-sproat2016rnn`: -.. code:: - - python run_evaluate.py --input=./en_with_types/output-00001-of-00100 --lang=en \ - [--cat CLASS_CATEGORY] [--input_case INPUT_CASE] - - -Classes -------- - -The base class for every grammar is :class:`GraphFst`. -This tool is designed as a two-stage application: 1. `classification` of the input into semiotic tokens and 2. `verbalization` into written form. -For every stage and every semiotic token class there is a corresponding grammar, e.g. :class:`taggers.CardinalFst` -and :class:`verbalizers.CardinalFst`. -Together, they compose the final grammars :class:`ClassifyFst` and -:class:`VerbalizeFinalFst` that are compiled into WFST and used for inference. - - -.. autoclass:: nemo_text_processing.text_normalization.en.ClassifyFst - :show-inheritance: - :members: - -.. autoclass:: nemo_text_processing.text_normalization.en.VerbalizeFinalFst - :show-inheritance: - :members: +Installation +------------ -Audio-based Text Normalization -============================== +`nemo_text_processing` is automatically installed with `NeMo `_. Quick Start Guide ----------------- -To normalize text that has corresponding audio recording, it is recommened to use `nemo_text_processing/text_normalization/normalize_with_audio.py `__ script \ -that provides multiple normalization options and chooses the one that minimizes character error rate (CER) of the automatic speech recognition (ASR) output. -The main difference between the default normalization and the audio-based one, is that most of the semiotic classes use deterministic=False flag. - -.. code-block:: python - # import WFST-based non-deterministic TN module - from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio +Text Normalization +^^^^^^^^^^^^^^^^^^ - # initialize normalizer - normalizer = NormalizerWithAudio( - lang="en", - input_case="cased", - overwrite_cache=False, - cache_dir="cache_dir", - ) - # try normalizer on a few examples - print(normalizer.normalize("123", n_tagged=10, punct_post_process=True)) - # >>> {'one hundred twenty three', 'one hundred and twenty three', 'one twenty three', 'one two three'} +.. code-block:: bash + cd NeMo/nemo_text_processing/text_normalization/ + python normalize.py --text="123" --language=en -To run this script with a .json manifest file, the manifest file should contain the following fields: -Parameters to run audio-based normalization (more details could be found in `nemo_text_processing/text_normalization/normalize_with_audio.py `__) +Inverse Text Normalization +^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. list-table:: Parameters to run audio-based normalization - :widths: 10 10 - :header-rows: 1 +.. code-block:: bash - * - **Parameter** - - **Description** - * - **audio_data** - - path to the audio file - * - **text** - - raw text - * - **pred_text** - - ASR model prediction - * - **n_tagged** - - Number of tagged options to return, -1 - return all possible tagged options + cd NeMo/nemo_text_processing/inverse_text_normalization/ + python inverse_normalize.py --text="one hundred twenty three" --language=en -See `examples/asr/transcribe_speech.py `__ on how to add ASR predictions. +Arguments: -When the manifest is ready, run: +* ``text`` - Input text. +* ``input_file`` - Input file with lines of input text. Only one of ``text`` or ``input_file`` is accepted. +* ``output_file`` - Output file to save normalizations. Needed if ``input_file`` is specified. +* ``language`` - language id. +* ``input_case`` - Only for text normalization. ``lower_cased`` or ``cased``. +* ``verbose`` - Outputs intermediate information. +* ``cache_dir`` - Specifies a cache directory for compiled grammars. If grammars exist, this significantly improves speed. +* ``overwrite_cache`` - Updates grammars in cache. +* ``whitelist`` - TSV file with custom mappings of written text to spoken form. -.. code-block:: python - python normalize_with_audio.py \ - --audio_data PATH/TO/MANIFEST.JSON \ - --language en +Audio-based TN +^^^^^^^^^^^^^^^^^^ -To run with a single audio file, specify path to audio and text with: +.. code-block:: bash - .. code-block:: python + cd NeMo/nemo_text_processing/text_normalization/ + python normalize_with_audio.py --text="123" --language="en" --n_tagged=10 --cache_dir="cache_dir" --audio_data="example.wav" --model="stt_en_conformer_ctc_large" - python normalize_with_audio.py \ - --audio_data PATH/TO/AUDIO.WAV \ - --language en \ - --text raw text OR PATH/TO/.TXT/FILE - --model QuartzNet15x5Base-En \ - --verbose +Additional Arguments: -To see possible normalization options for a text input without an audio file (could be used for debugging), run: +* ``text`` - Input text or `JSON manifest file `_ with multiple audio paths. +* ``audio_data`` - (Optional) Input audio. +* ``model`` - `Off-shelf NeMo CTC ASR model name `_ or path to local NeMo model checkpoint ending on .nemo +* ``n_tagged`` - number of normalization options to output. - .. code-block:: python - python python normalize_with_audio.py --text "RAW TEXT" --cache_dir "" - -Specify `--cache_dir` to generate .far grammars once and re-used them for faster inference. - -See `nemo_text_processing/text_normalization/normalize_with_audio.py `__ for more arguments. +.. note:: + More details can be found in `NeMo/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb `__ in `Google's Colab `_. + +Language Support Matrix +------------------------ + ++------------------+----------+----------+----------+--------------------+ +| **Language** | **ID** | **TN** | **ITN** | **Audio-based TN** | ++------------------+----------+----------+----------+--------------------+ +| English | en | x | x | x | ++------------------+----------+----------+----------+--------------------+ +| Spanish | es | x | x | x | ++------------------+----------+----------+----------+--------------------+ +| German | de | x | x | x | ++------------------+----------+----------+----------+--------------------+ +| French | fr | | x | | ++------------------+----------+----------+----------+--------------------+ +| Russian | ru | | x | x | ++------------------+----------+----------+----------+--------------------+ +| Vietnamese | vi | | x | | ++------------------+----------+----------+----------+--------------------+ + +Grammar customization +--------------------- -Supported Languages -------------------- +.. note:: -Deterministic TN supports: English, German and Spanish languages. -Non-deterministic (audio-based) TN supports: English, German, Spanish, and Russian languages. + In-depth walk through `NeMo/tutorials/text_processing/WFST_tutorial.ipynb `__ in `Google's Colab `_. -Installation ------------- -`nemo_text_processing` is installed with the `nemo_toolkit`. +Deploy to C++ +----------------- +See :doc:`Text Procesing Deployment ` for details. -See :doc:`NeMo Introduction <../starthere/intro>` for installation details. -Additional requirements can be found in `setup.sh `_. References ---------- diff --git a/docs/source/nlp/text_normalization/wfst/wfst_text_processing_deployment.rst b/docs/source/nlp/text_normalization/wfst/wfst_text_processing_deployment.rst index 9927ca2cd32a..d40926dad5d4 100644 --- a/docs/source/nlp/text_normalization/wfst/wfst_text_processing_deployment.rst +++ b/docs/source/nlp/text_normalization/wfst/wfst_text_processing_deployment.rst @@ -1,81 +1,93 @@ .. _wfst_deployment: -NeMo Text Processing Deployment -=============================== +Deploy to Production with C++ backend +===================================== + +NeMo provides tools to deploy :doc:`TN and ITN ` for production :cite:`textprocessing-deployment-zhang2021nemo`. +It uses `Sparrowhawk `_ :cite:`textprocessing-deployment-sparrowhawk` -- an open-source C++ framework by Google. +The grammars written with NeMo can be exported into an `OpenFST `_ Archive File (FAR) and dropped into Sparrowhawk. + + .. image:: images/deployment_pipeline.png + :align: center + :alt: Deployment pipeline + :scale: 50% -NeMo provides a tool for deployment of :doc:`NeMo Inverse Text Normalization (ITN) ` and :doc:`NeMo Text Normalization (TN) ` for production :cite:`textprocessing-deployment-zhang2021nemo`. -It uses `Sparrowhawk `_ :cite:`textprocessing-deployment-sparrowhawk` -- an open-source version of Google Kestrel :cite:`textprocessing-deployment-ebden2015kestrel`. -The scripts for deployment could be found at `NeMo/tools/text_processing_deployment `_. Requirements ------------ -:doc:`nemo_text_processing ` package +* :doc:`nemo_text_processing ` package +* `Docker `_ +* `NeMo source code `_ -Usage ------ +.. _wfst_deployment_quick_start: +Quick Start +----------- -Starts docker container with production backend with plugged in grammars. This is entry point script. +Examples how to run: -Arguments: -^^^^^^^^^ -* ``GRAMMARS`` - ``tn_grammars`` or ``itn_grammars`` to export either TN or ITN grammars from :doc:`WFST ITN ` or :doc:`WFST TN `. -* ``LANGUAGE`` - `en` for English -* ``INPUT_CASE`` - ``cased`` or ``lower_cased`` (lower_cased is supported only in TN grammars). -* ``MODE`` - choose ``test`` to run test on the grammars inside the container. +.. code-block:: bash -For example: + # export English TN grammars and return prompt inside docker container + cd NeMo/tools/text_processing_deployment + bash export_grammars.sh --GRAMMARS=tn_grammars --LANGUAGE=en --INPUT_CASE=cased + # export English ITN grammars and return prompt inside docker container + cd NeMo/tools/text_processing_deployment + bash export_grammars.sh --GRAMMARS=itn_grammars --LANGUAGE=en -.. code-block:: bash - # to export ITN grammars - cd NeMo/tools/text_processing_deployment - bash export_grammar.sh --GRAMMARS=itn_grammars --LANGUAGE=en +Arguments: +^^^^^^^^^^ +* ``GRAMMARS`` - ``tn_grammars`` or ``itn_grammars`` to export either TN or ITN grammars. +* ``LANGUAGE`` - `en` for English. Click :doc:`here ` for full list of languages. +* ``INPUT_CASE`` - ``cased`` or ``lower_cased`` (ITN has no differentiation between these two, only used for TN). +* ``MODE`` - By default ``export`` which returns prompt inside the docker. If ``--MODE=test`` runs NeMo pytests inside container. +* ``OVERWRITE_CACHE`` - Whether to re-export grammars or load from cache. By default ``True``. +* ``FORCE_REBUILD`` - Whether to rebuild docker image in cased of updated dependencies. By default ``False``. - # to export and test TN grammars - bash export_grammar.sh --GRAMMARS=itn_grammars --INPUT_CASE=cased --MODE=test --LANGUAGE=en +Detailed pipeline +----------------- -This script runs the following steps in sequence: +`export_grammars.sh` runs the following steps in sequence: -Exports grammar `ClassifyFst` and `VerbalizeFst` from :doc:`nemo_text_processing ` to `OUTPUT_DIR/classify/tokenize_and_classify.far` and `OUTPUT_DIR/verbalize/verbalize.far` respectively. +Go to script folder: .. code-block:: bash cd NeMo/tools/text_processing_deployment - python pynini_export.py <--output_dir OUTPUT_DIR> <--grammars GRAMMARS> <--input_case INPUT_CASE> <--language LANGUAGE> -Builds C++ production backend docker +1. Grammars written in Python are exported to `OpenFST `_ archive files (FAR). Specifically, grammars `ClassifyFst` and `VerbalizeFst` from :doc:`nemo_text_processing ` are exported and saved to `./LANGUAGE/classify/tokenize_and_classify.far` and `./LANGUAGE/verbalize/verbalize.far` respectively. .. code-block:: bash - cd NeMo/tools/text_processing_deployment - bash docker/build.sh + python pynini_export.py <--output_dir .> <--grammars GRAMMARS> <--input_case INPUT_CASE> <--language LANGUAGE> +.. warning:: -Plugs in grammars into production backend by mounting grammar directory `classify/` and `verbalize/` with sparrowhawk grammar directory inside docker. Returns docker prompt + TN and ITN grammars are saved to the same file by default. + +2. Docker image is built with dependencies, including `Thrax `_ and `Sparrowhawk `_. .. code-block:: bash - cd NeMo/tools/text_processing_deployment - # to launch container with the exported grammars - bash docker/launch.sh + bash docker/build.sh - # to launch container with the exported grammars and run tests on TN grammars - bash docker/launch.sh test_tn_grammars +3. Plugs in grammars into production backend by mounting grammar directory `LANGUAGE/classify/` and `LANGUAGE/verbalize/` inside docker. Returns docker prompt. - # to launch container with the exported grammars and run tests on ITN grammars - bash docker/launch.sh test_itn_grammars +.. code-block:: bash + # launch container with the exported grammars + bash docker/launch.sh -Runs TN or ITN in docker container: +4. Runs system in docker container. .. code-block:: bash - echo "two dollars fifty" | ../../src/bin/normalizer_main --config=sparrowhawk_configuration.ascii_proto + echo "ITN result: two dollars fifty. TN result: $2.50" | ../../src/bin/normalizer_main --config=sparrowhawk_configuration.ascii_proto -This returns $2.50 for ITN. +This returns "ITN result: $2.50. TN result: two dollars fifty cents" References ---------- diff --git a/docs/source/starthere/tutorials.rst b/docs/source/starthere/tutorials.rst index a526b28130e5..280c0555bc40 100644 --- a/docs/source/starthere/tutorials.rst +++ b/docs/source/starthere/tutorials.rst @@ -146,14 +146,11 @@ To run a tutorial: - CTC Segmentation - `CTC Segmentation `_ * - Text Processing - - Text Normalization for TTS - - `Text Normalization `_ - * - Text Processing - - Inverse Text Normalization for ASR - - `Inverse Text Normalization `_ + - Text Normalization and Inverse Normalization for ASR and TTS + - `Text Normalization `_ * - Text Processing - Inverse Text Normalization for ASR - Thutmose Tagger - `Inverse Text Normalization with Thutmose Tagger `_ * - Text Processing - Constructing Normalization Grammars with WFSTs - - `WFST Tutorial `_ + - `WFST Tutorial `_ diff --git a/nemo_text_processing/README.md b/nemo_text_processing/README.md index 46cce95b281a..181d6ddc8e3b 100644 --- a/nemo_text_processing/README.md +++ b/nemo_text_processing/README.md @@ -5,4 +5,4 @@ Introduction ------------ NeMo's `nemo_text_processing` is a Python package that is installed with the `nemo_toolkit`. -See [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html) for details. \ No newline at end of file +See [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_normalization.html) for details. \ No newline at end of file diff --git a/nemo_text_processing/inverse_text_normalization/README.md b/nemo_text_processing/inverse_text_normalization/README.md index d3525922c2f6..c0dcb24481b8 100644 --- a/nemo_text_processing/inverse_text_normalization/README.md +++ b/nemo_text_processing/inverse_text_normalization/README.md @@ -1,61 +1,10 @@ # Inverse Text Normalization -Inverse text normalization (ITN) is a part of the Automatic Speech Recognition (ASR) post-processing pipeline. -ITN is the task of converting the raw spoken output of the ASR model into its written form to improve text readability. +Inverse Text Normalization is part of NeMo's `nemo_text_processing` - a Python package that is installed with the `nemo_toolkit`. +It converts text from spoken form into written form, e.g. "one hundred twenty three" -> "123". -For example, `one hundred twenty three kilograms` -> `123 kg` +See [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_normalization.html) for details. -# Documentation +Tutorial with overview of the package capabilities: [Text_(Inverse)_Normalization.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb) -[ITN documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_inverse_text_normalization.html). - -[TN/INT tutorials NeMo/tutorials/text_processing](https://github.com/NVIDIA/NeMo/tree/stable/tutorials/text_processing). - -# Installation - -``` bash setup.sh ``` - -# Integrate ITN to a text processing pipeline - -``` -# import WFST-based ITN module -from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer - -# initialize inverse normalizer -inverse_normalizer = InverseNormalizer(lang="en", cache_dir="CACHE_DIR") - -# try normalizer on a few examples -print(inverse_normalizer.normalize("it costs one hundred and twenty three dollars")) -# >>>"it costs $123" - -print(inverse_normalizer.normalize("in nineteen seventy")) -# >>> "in 1970" -``` - -# Prediction - -``` -# run prediction on -python run_predict.py --input= --output= --lang= \ - [--verbose] - -# single input prediction -python inverse_normalize.py --lang= \ - [--verbose] [--overwrite_cache] [--cache_dir=] -``` - -The input is expected to be lower-cased. Punctuation are outputted with separating spaces after semiotic tokens, e.g. `"i see, it is ten o'clock..."` -> `"I see, it is 10:00 . . ."`. -Inner-sentence white-space characters in the input are not maintained. -See the above scripts for more details. - -# Supported Languages - -ITN supports: English, Spanish, German, French, Vietnamese, and Russian languages. - -# Evaluation -Evaluation on text normalization data, e.g. [Google text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish). - -``` -python run_evaluate.py --input=./en_with_types/output-00001-of-00100 \ - [--cat CATEGORY] [--filter] -``` \ No newline at end of file +Tutorial on how to customize the underlying gramamrs: [WFST_Tutorial.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb) \ No newline at end of file diff --git a/nemo_text_processing/inverse_text_normalization/inverse_normalize.py b/nemo_text_processing/inverse_text_normalization/inverse_normalize.py index fa0a3e29d35f..6b2c84e571b4 100644 --- a/nemo_text_processing/inverse_text_normalization/inverse_normalize.py +++ b/nemo_text_processing/inverse_text_normalization/inverse_normalize.py @@ -16,7 +16,12 @@ from time import perf_counter from typing import List -from nemo_text_processing.text_normalization.data_loader_utils import check_installation, get_installation_msg +from nemo_text_processing.text_normalization.data_loader_utils import ( + check_installation, + get_installation_msg, + load_file, + write_file, +) from nemo_text_processing.text_normalization.normalize import Normalizer from nemo_text_processing.text_normalization.token_parser import TokenParser @@ -102,7 +107,10 @@ def inverse_normalize(self, text: str, verbose: bool) -> str: def parse_args(): parser = ArgumentParser() - parser.add_argument("input_string", help="input string", type=str) + input = parser.add_mutually_exclusive_group() + input.add_argument("--text", dest="input_string", help="input string", type=str) + input.add_argument("--input_file", dest="input_file", help="input file path", type=str) + parser.add_argument('--output_file', dest="output_file", help="output file path", type=str) parser.add_argument( "--language", help="language", choices=['en', 'de', 'es', 'ru', 'fr', 'vi'], default="en", type=str ) @@ -124,6 +132,17 @@ def parse_args(): lang=args.language, cache_dir=args.cache_dir, overwrite_cache=args.overwrite_cache ) print(f'Time to generate graph: {round(perf_counter() - start_time, 2)} sec') - start_time = perf_counter() - print(inverse_normalizer.inverse_normalize(args.input_string, verbose=args.verbose)) - print(f'Execution time: {round(perf_counter() - start_time, 2)} sec') + + if args.input_string: + print(inverse_normalizer.inverse_normalize(args.input_string, verbose=args.verbose)) + elif args.input_file: + print("Loading data: " + args.input_file) + data = load_file(args.input_file) + + print("- Data: " + str(len(data)) + " sentences") + prediction = inverse_normalizer.inverse_normalize_list(data, verbose=args.verbose) + if args.output_file: + write_file(args.output_file, prediction) + print(f"- Denormalized. Writing out to {args.output_file}") + else: + print(prediction) diff --git a/nemo_text_processing/inverse_text_normalization/run_predict.py b/nemo_text_processing/inverse_text_normalization/run_predict.py deleted file mode 100644 index 811f8da11523..000000000000 --- a/nemo_text_processing/inverse_text_normalization/run_predict.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from argparse import ArgumentParser -from typing import List - -from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer - - -''' -Runs inverse text normalization on text data -''' - - -def load_file(file_path: str) -> List[str]: - """ - Load given text file into list of string. - - Args: - file_path: file path - - Returns: flat list of string - """ - res = [] - with open(file_path, 'r', encoding='utf-8') as fp: - for line in fp: - res.append(line) - return res - - -def write_file(file_path: str, data: List[str]): - """ - Writes out list of string to file. - - Args: - file_path: file path - data: list of string - - """ - with open(file_path, 'w', encoding='utf-8') as fp: - for line in data: - fp.write(line + '\n') - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument("--input", help="input file path", required=True, type=str) - parser.add_argument( - "--language", help="language", choices=['en', 'de', 'es', 'ru', 'fr', 'vi'], default="en", type=str - ) - parser.add_argument("--output", help="output file path", required=True, type=str) - parser.add_argument("--verbose", help="print denormalization info. For debugging", action='store_true') - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() - file_path = args.input - inverse_normalizer = InverseNormalizer(lang=args.language) - - print("Loading data: " + file_path) - data = load_file(file_path) - - print("- Data: " + str(len(data)) + " sentences") - inverse_normalizer_prediction = inverse_normalizer.inverse_normalize_list(data, verbose=args.verbose) - write_file(args.output, inverse_normalizer_prediction) - print(f"- Denormalized. Writing out to {args.output}") diff --git a/nemo_text_processing/text_normalization/README.md b/nemo_text_processing/text_normalization/README.md index 83018cac03c8..515572f27b76 100644 --- a/nemo_text_processing/text_normalization/README.md +++ b/nemo_text_processing/text_normalization/README.md @@ -1,53 +1,10 @@ # Text Normalization -NeMo Text Normalization converts text from written form into its verbalized form. It is used as a preprocessing step before Text to Speech (TTS). It could also be used for preprocessing Automatic Speech Recognition (ASR) training transcripts. +Text Normalization is part of NeMo's `nemo_text_processing` - a Python package that is installed with the `nemo_toolkit`. +It converts text from written form into its verbalized form, e.g. "123" -> "one hundred twenty three". -For example, `123 kg` -> `one hundred twenty three kilograms` +See [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_normalization.html) for details. -# Documentation +Tutorial with overview of the package capabilities: [Text_(Inverse)_Normalization.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb) -[TN documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_normalization.html). - -[TN/INT tutorials: NeMo/tutorials/text_processing](https://github.com/NVIDIA/NeMo/tree/stable/tutorials/text_processing). - -# Installation -`bash ../setup.sh` - -# Integrate TN to a text processing pipeline - -``` -# import WFST-based TN module -from nemo_text_processing.text_normalization.normalize import Normalizer - -# initialize normalizer -normalizer = Normalizer(input_case="cased", lang="en") - -# try normalizer on a few examples -print(normalizer.normalize("123")) -# >>> one hundred twenty three -print(normalizer.normalize_list(["at 10:00", "it weights 10kg."], punct_post_process=True)) -# >>> ["at ten o'clock", 'it weights ten kilograms.'] -``` - -# Prediction - -``` -# run prediction on -python run_predict.py --input= --output= --lang= \ - [--input_case=] - -# single input prediction -python normalize.py --lang= \ - [--verbose] [--overwrite_cache] [--cache_dir=] [--input_case=] -``` - -# Evaluation - -Evaluation on text normalization data, e.g. [Google text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish). - - -``` python run_evaluate.py --input=./en_with_types/output-00001-of-00100 [--cat CATEGORY] ``` - -# Audio-based normalization - -See [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_normalization.html#audio-based-text-normalization) for more details. \ No newline at end of file +Tutorial on how to customize the underlying gramamrs: [WFST_Tutorial.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb) \ No newline at end of file diff --git a/nemo_text_processing/text_normalization/data_loader_utils.py b/nemo_text_processing/text_normalization/data_loader_utils.py index 887ca75cc5eb..4a83e1f49579 100644 --- a/nemo_text_processing/text_normalization/data_loader_utils.py +++ b/nemo_text_processing/text_normalization/data_loader_utils.py @@ -41,7 +41,7 @@ ] -def load_kaggle_text_norm_file(file_path: str) -> List[Instance]: +def _load_kaggle_text_norm_file(file_path: str) -> List[Instance]: """ https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish Loads text file in the Kaggle Google text normalization file format: \t\t<`self` if trivial class or normalized text> @@ -81,7 +81,7 @@ def load_kaggle_text_norm_file(file_path: str) -> List[Instance]: return res -def load_files(file_paths: List[str], load_func=load_kaggle_text_norm_file) -> List[Instance]: +def load_files(file_paths: List[str], load_func=_load_kaggle_text_norm_file) -> List[Instance]: """ Load given list of text files using the `load_func` function. @@ -254,3 +254,33 @@ def check_installation(): def get_installation_msg(): msg = "`pynini` is not installed ! \n Please run the `nemo_text_processing/setup.sh` script prior to usage of this toolkit." return msg + + +def load_file(file_path: str) -> List[str]: + """ + Loads given text file with separate lines into list of string. + + Args: + file_path: file path + + Returns: flat list of string + """ + res = [] + with open(file_path, 'r') as fp: + for line in fp: + res.append(line) + return res + + +def write_file(file_path: str, data: List[str]): + """ + Writes out list of string to file. + + Args: + file_path: file path + data: list of string + + """ + with open(file_path, 'w') as fp: + for line in data: + fp.write(line + '\n') diff --git a/nemo_text_processing/text_normalization/en/taggers/range.py b/nemo_text_processing/text_normalization/en/taggers/range.py index 721efd5bc571..42ccd5faf4d0 100644 --- a/nemo_text_processing/text_normalization/en/taggers/range.py +++ b/nemo_text_processing/text_normalization/en/taggers/range.py @@ -13,12 +13,7 @@ # limitations under the License. -from nemo_text_processing.text_normalization.en.graph_utils import ( - NEMO_DIGIT, - GraphFst, - convert_space, - delete_extra_space, -) +from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, GraphFst, convert_space try: import pynini @@ -50,7 +45,7 @@ def __init__( delete_space = pynini.closure(pynutil.delete(" "), 0, 1) cardinal = cardinal.graph_with_and - approx = pynini.cross("~", "approximately") + delete_extra_space + approx = pynini.cross("~", "approximately") # TIME time_graph = time + delete_space + pynini.cross("-", " to ") + delete_space + time diff --git a/nemo_text_processing/text_normalization/normalize.py b/nemo_text_processing/text_normalization/normalize.py index a8b583a6e06c..84bb00af1f0c 100644 --- a/nemo_text_processing/text_normalization/normalize.py +++ b/nemo_text_processing/text_normalization/normalize.py @@ -20,7 +20,12 @@ from math import factorial from typing import Dict, List, Union -from nemo_text_processing.text_normalization.data_loader_utils import get_installation_msg, pre_process +from nemo_text_processing.text_normalization.data_loader_utils import ( + get_installation_msg, + load_file, + pre_process, + write_file, +) from nemo_text_processing.text_normalization.token_parser import PRESERVE_ORDER_KEY, TokenParser from tqdm import tqdm @@ -111,7 +116,9 @@ def __init__( self.processor = None print("NeMo NLP is not available. Moses de-tokenization will be skipped.") - def normalize_list(self, texts: List[str], verbose=False, punct_post_process: bool = False) -> List[str]: + def normalize_list( + self, texts: List[str], verbose=False, punct_pre_process: bool = False, punct_post_process: bool = False + ) -> List[str]: """ NeMo text normalizer @@ -124,7 +131,9 @@ def normalize_list(self, texts: List[str], verbose=False, punct_post_process: bo res = [] for input in tqdm(texts): try: - text = self.normalize(input, verbose=verbose, punct_post_process=punct_post_process) + text = self.normalize( + input, verbose=verbose, punct_pre_process=punct_pre_process, punct_post_process=punct_post_process + ) except: print(input) raise Exception @@ -367,7 +376,10 @@ def select_verbalizer(self, lattice: 'pynini.FstLike') -> str: def parse_args(): parser = ArgumentParser() - parser.add_argument("input_string", help="input string", type=str) + input = parser.add_mutually_exclusive_group() + input.add_argument("--text", dest="input_string", help="input string", type=str) + input.add_argument("--input_file", dest="input_file", help="input file path", type=str) + parser.add_argument('--output_file', dest="output_file", help="output file path", type=str) parser.add_argument("--language", help="language", choices=["en", "de", "es"], default="en", type=str) parser.add_argument( "--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str @@ -393,6 +405,7 @@ def parse_args(): if __name__ == "__main__": args = parse_args() whitelist = os.path.abspath(args.whitelist) if args.whitelist else None + normalizer = Normalizer( input_case=args.input_case, cache_dir=args.cache_dir, @@ -400,11 +413,28 @@ def parse_args(): whitelist=whitelist, lang=args.language, ) - print( - normalizer.normalize( - args.input_string, + if args.input_string: + print( + normalizer.normalize( + args.input_string, + verbose=args.verbose, + punct_pre_process=args.punct_pre_process, + punct_post_process=args.punct_post_process, + ) + ) + elif args.input_file: + print("Loading data: " + args.input_file) + data = load_file(args.input_file) + + print("- Data: " + str(len(data)) + " sentences") + normalizer_prediction = normalizer.normalize_list( + data, verbose=args.verbose, punct_pre_process=args.punct_pre_process, punct_post_process=args.punct_post_process, ) - ) + if args.output_file: + write_file(args.output_file, normalizer_prediction) + print(f"- Normalized. Writing out to {args.output_file}") + else: + print(normalizer_prediction) diff --git a/nemo_text_processing/text_normalization/run_predict.py b/nemo_text_processing/text_normalization/run_predict.py deleted file mode 100644 index 2d99e7a088d1..000000000000 --- a/nemo_text_processing/text_normalization/run_predict.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from argparse import ArgumentParser -from typing import List - -from nemo_text_processing.text_normalization.normalize import Normalizer - - -''' -Runs normalization prediction on text data -''' - - -def load_file(file_path: str) -> List[str]: - """ - Load given text file into list of string. - - Args: - file_path: file path - - Returns: flat list of string - """ - res = [] - with open(file_path, 'r') as fp: - for line in fp: - res.append(line) - return res - - -def write_file(file_path: str, data: List[str]): - """ - Writes out list of string to file. - - Args: - file_path: file path - data: list of string - - """ - with open(file_path, 'w') as fp: - for line in data: - fp.write(line + '\n') - - -def parse_args(): - parser = ArgumentParser() - parser.add_argument("--input", help="input file path", required=True, type=str) - parser.add_argument("--language", help="language", choices=['en'], default="en", type=str) - parser.add_argument("--output", help="output file path", required=True, type=str) - parser.add_argument( - "--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str - ) - parser.add_argument("--verbose", help="print meta info for debugging", action='store_true') - return parser.parse_args() - - -if __name__ == "__main__": - args = parse_args() - file_path = args.input - normalizer = Normalizer(input_case=args.input_case, lang=args.language) - - print("Loading data: " + file_path) - data = load_file(file_path) - - print("- Data: " + str(len(data)) + " sentences") - normalizer_prediction = normalizer.normalize_list(data, verbose=args.verbose) - write_file(args.output, normalizer_prediction) - print(f"- Normalized. Writing out to {args.output}") diff --git a/requirements/requirements_text_processing.txt b/requirements/requirements_nemo_text_processing.txt similarity index 100% rename from requirements/requirements_text_processing.txt rename to requirements/requirements_nemo_text_processing.txt diff --git a/setup.py b/setup.py index 5029b788f327..5514c7057250 100644 --- a/setup.py +++ b/setup.py @@ -80,7 +80,7 @@ def req_file(filename, folder="requirements"): # User packages 'test': req_file("requirements_test.txt"), # NeMo Tools - 'text_processing': req_file("requirements_text_processing.txt"), + 'nemo_text_processing': req_file("requirements_nemo_text_processing.txt"), # Torch Packages # 'torch_tts': req_file("requirements_torch_tts.txt"), ## Removed in 1.7.0 # Lightning Collections Packages @@ -96,7 +96,7 @@ def req_file(filename, folder="requirements"): extras_require['all'] = list(chain(extras_require.values())) # Add lightning requirements as needed -extras_require['common'] = list(chain([extras_require['common'], extras_require['text_processing']])) +extras_require['common'] = list(chain([extras_require['common'], extras_require['nemo_text_processing']])) extras_require['test'] = list(chain([extras_require['tts'], extras_require['core'], extras_require['common']])) extras_require['asr'] = list(chain([extras_require['asr'], extras_require['core'], extras_require['common']])) extras_require['cv'] = list(chain([extras_require['cv'], extras_require['core'], extras_require['common']])) diff --git a/tests/nemo_text_processing/en/data_text_normalization/test_cases_address.txt b/tests/nemo_text_processing/en/data_text_normalization/test_cases_address.txt index dad2d181b1cd..20729bc3b9c7 100644 --- a/tests/nemo_text_processing/en/data_text_normalization/test_cases_address.txt +++ b/tests/nemo_text_processing/en/data_text_normalization/test_cases_address.txt @@ -5,4 +5,5 @@ 1211 E Arques Ave~twelve eleven East Arques Avenue 708 N 1st St, San City~seven zero eight North first Street, San City 12 S 1st st~twelve South first Street -1990 for the Ata ST~nineteen ninety for the Ata ST \ No newline at end of file +1990 for the Ata ST~nineteen ninety for the Ata ST +Main St.~Main St . \ No newline at end of file diff --git a/tests/nemo_text_processing/en/data_text_normalization/test_cases_serial.txt b/tests/nemo_text_processing/en/data_text_normalization/test_cases_serial.txt index e3d2f481312d..8e79047406a3 100644 --- a/tests/nemo_text_processing/en/data_text_normalization/test_cases_serial.txt +++ b/tests/nemo_text_processing/en/data_text_normalization/test_cases_serial.txt @@ -29,4 +29,5 @@ covid-19~covid-nineteen a 4-kilogram bag~a four-kilogram bag 100-car~one hundred-car 123/261788/2021~one hundred twenty three/two six one seven eight eight/two thousand twenty one -2*8~two asterisk eight \ No newline at end of file +2*8~two asterisk eight +and/or~and slash or \ No newline at end of file diff --git a/tests/nemo_text_processing/en/data_text_normalization/test_cases_time.txt b/tests/nemo_text_processing/en/data_text_normalization/test_cases_time.txt index a340ebcc11ea..c337c6ddf086 100644 --- a/tests/nemo_text_processing/en/data_text_normalization/test_cases_time.txt +++ b/tests/nemo_text_processing/en/data_text_normalization/test_cases_time.txt @@ -18,3 +18,4 @@ 11:20AM,2:40PM and 10:10PM.~eleven twenty AM , two forty PM and ten ten PM . 2pm-5pm~two PM to five PM 5pm~five PM +11:30...~eleven thirty . . . diff --git a/tests/nemo_text_processing/en/data_text_normalization/test_cases_whitelist.txt b/tests/nemo_text_processing/en/data_text_normalization/test_cases_whitelist.txt index 1e3aeab78e1f..37d8d54d9556 100644 --- a/tests/nemo_text_processing/en/data_text_normalization/test_cases_whitelist.txt +++ b/tests/nemo_text_processing/en/data_text_normalization/test_cases_whitelist.txt @@ -1,3 +1,6 @@ Dr. Evil~doctor Evil Mrs. Norris~misses Norris dr. Evil~dr . Evil +DNA is~DNA is +C. S. Lewis~CS Lewis +tv~TV diff --git a/tests/nemo_text_processing/en/data_text_normalization/test_cases_word.txt b/tests/nemo_text_processing/en/data_text_normalization/test_cases_word.txt index f64eb54b93dc..d74a902a7729 100644 --- a/tests/nemo_text_processing/en/data_text_normalization/test_cases_word.txt +++ b/tests/nemo_text_processing/en/data_text_normalization/test_cases_word.txt @@ -40,3 +40,5 @@ $ and 5% or %~dollar and five percent or percent sign (1)Hello~( one ) Hello !1~! one 1!hello~one ! hello +love him while we may,~love him while we may , +mar~mar diff --git a/tutorials/text_processing/Inverse_Text_Normalization.ipynb b/tutorials/text_processing/Inverse_Text_Normalization.ipynb deleted file mode 100755 index 6883ddbeb95e..000000000000 --- a/tutorials/text_processing/Inverse_Text_Normalization.ipynb +++ /dev/null @@ -1,515 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 5, - "metadata": { - "colab": { - "name": "Inverse_Text_Normalization.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.8" - } - }, - "cells": [ - { - "cell_type": "code", - "metadata": { - "id": "U1GACXvL5GhV" - }, - "source": [ - "if 'google.colab' in str(get_ipython()):\n", - " !pip install -q condacolab\n", - " import condacolab\n", - " condacolab.install()" - ], - "id": "U1GACXvL5GhV", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "YxVLI-f97Kxl" - }, - "source": [ - "\"\"\"\n", - "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", - "\"\"\"\n", - "\n", - "BRANCH = 'r1.9.0'" - ], - "id": "YxVLI-f97Kxl", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "TcWLxxXC7Jgx" - }, - "source": [ - "\n", - "# If you're using Google Colab and not running locally, run this cell.\n", - "# install NeMo\n", - "if 'google.colab' in str(get_ipython()):\n", - " !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" - ], - "id": "TcWLxxXC7Jgx", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "4nf8sui349co" - }, - "source": [ - "if 'google.colab' in str(get_ipython()):\n", - " !conda install -c conda-forge pynini=2.1.3\n", - " ! mkdir images\n", - " ! wget https://github.com/NVIDIA/NeMo/blob/$BRANCH/tutorials/text_processing/images/deployment.png -O images/deployment.png\n", - " ! wget https://github.com/NVIDIA/NeMo/blob/$BRANCH/tutorials/text_processing/images/pipeline.png -O images/pipeline.png" - ], - "id": "4nf8sui349co", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "homeless-richardson" - }, - "source": [ - "import os\n", - "import wget\n", - "import pynini\n", - "import nemo_text_processing" - ], - "id": "homeless-richardson", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "interracial-listing" - }, - "source": [ - "# Task Description\n", - "\n", - "Inverse text normalization (ITN) is a part of the Automatic Speech Recognition (ASR) post-processing pipeline. \n", - "\n", - "ITN is the task of converting the raw spoken output of the ASR model into its written form to improve the text readability. For example, `in nineteen seventy five` should be changed to `in 1975` and `one hundred and twenty three dollars` to `$123`." - ], - "id": "interracial-listing" - }, - { - "cell_type": "markdown", - "metadata": { - "id": "desirable-words" - }, - "source": [ - "# NeMo Inverse Text Normalization\n", - "\n", - "NeMo ITN is based on weighted finite-state\n", - "transducer (WFST) grammars. The tool uses [`Pynini`](https://github.com/kylebgorman/pynini) to construct WFSTs, and the created grammars can be exported and integrated into [`Sparrowhawk`](https://github.com/google/sparrowhawk) (an open-source version of [The Kestrel TTS text normalization system](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)) for production. The NeMo ITN tool can be seen as a Python extension of `Sparrowhawk`. \n", - "\n", - "Currently, NeMo ITN provides support for English and the following semiotic classes from the [Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish):\n", - "DATE, CARDINAL, MEASURE, DECIMAL, ORDINAL, MONEY, TIME, PLAIN. \n", - "We additionally added the class `WHITELIST` for all whitelisted tokens whose verbalizations are directly looked up from a user-defined list.\n", - "\n", - "The toolkit is modular, easily extendable, and can be adapted to other languages and tasks like [text normalization](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_Normalization.ipynb). The Python environment enables an easy combination of text covering grammars with NNs. \n", - "\n", - "The rule-based system is divided into a classifier and a verbalizer following [Google's Kestrel](https://www.researchgate.net/profile/Richard_Sproat/publication/277932107_The_Kestrel_TTS_text_normalization_system/links/57308b1108aeaae23f5cc8c4/The-Kestrel-TTS-text-normalization-system.pdf) design: the classifier is responsible for detecting and classifying semiotic classes in the underlying text, the verbalizer the verbalizes the detected text segment. \n", - "\n", - "The overall NeMo ITN pipeline from development in `Pynini` to deployment in `Sparrowhawk` is shown below:\n", - "![alt text](images/deployment.png \"Inverse Text Normalization Pipeline\")" - ], - "id": "desirable-words" - }, - { - "cell_type": "markdown", - "metadata": { - "id": "military-radius" - }, - "source": [ - "# Quick Start\n", - "\n", - "## Add ITN to your Python ASR post-processing workflow\n", - "\n", - "ITN is a part of the `nemo_text_processing` package which is installed with `nemo_toolkit`. Installation instructions could be found [here](https://github.com/NVIDIA/NeMo/tree/main/README.rst)." - ], - "id": "military-radius" - }, - { - "cell_type": "code", - "metadata": { - "id": "limiting-genesis" - }, - "source": [ - "from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer\n", - "\n", - "inverse_normalizer = InverseNormalizer(lang='en')\n", - "\n", - "raw_text = \"we paid one hundred and twenty three dollars for this desk, and this.\"\n", - "inverse_normalizer.inverse_normalize(raw_text, verbose=False)" - ], - "id": "limiting-genesis", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "downtown-inventory" - }, - "source": [ - "In the above cell, `one hundred and twenty three dollars` would be converted to `$123`, and the rest of the words remain the same.\n", - "\n", - "## Run Inverse Text Normalization on an input from a file\n", - "\n", - "Use `run_predict.py` to convert a spoken text from a file `INPUT_FILE` to a written format and save the output to `OUTPUT_FILE`. Under the hood, `run_predict.py` is calling `inverse_normalize()` (see the above section)." - ], - "id": "downtown-inventory" - }, - { - "cell_type": "code", - "metadata": { - "id": "streaming-butterfly" - }, - "source": [ - "# If you're running the notebook locally, update the NEMO_TEXT_PROCESSING_PATH below\n", - "# In Colab, a few required scripts will be downloaded from NeMo github\n", - "\n", - "NEMO_TOOLS_PATH = '/nemo_text_processing/inverse_text_normalization'\n", - "DATA_DIR = 'data_dir'\n", - "os.makedirs(DATA_DIR, exist_ok=True)\n", - "\n", - "if 'google.colab' in str(get_ipython()):\n", - " NEMO_TOOLS_PATH = '.'\n", - "\n", - " required_files = ['run_predict.py',\n", - " 'run_evaluate.py']\n", - " for file in required_files:\n", - " if not os.path.exists(file):\n", - " file_path = 'https://raw.githubusercontent.com/NVIDIA/NeMo/' + BRANCH + '/nemo_text_processing/inverse_text_normalization/' + file\n", - " print(file_path)\n", - " wget.download(file_path)\n", - "elif not os.path.exists(NEMO_TOOLS_PATH):\n", - " raise ValueError(f'update path to NeMo root directory')\n", - "\n", - "INPUT_FILE = f'{DATA_DIR}/test.txt'\n", - "OUTPUT_FILE = f'{DATA_DIR}/test_itn.txt'\n", - "\n", - "! echo \"on march second twenty twenty\" > $DATA_DIR/test.txt\n", - "! python $NEMO_TOOLS_PATH/run_predict.py --input=$INPUT_FILE --output=$OUTPUT_FILE --language='en'" - ], - "id": "streaming-butterfly", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "cordless-louisiana" - }, - "source": [ - "# check that the raw text was indeed converted to the written form\n", - "! cat $OUTPUT_FILE" - ], - "id": "cordless-louisiana", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "italic-parish" - }, - "source": [ - "## Run evaluation\n", - "\n", - "[Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish) consists of 1.1 billion words of English text from Wikipedia, divided across 100 files. The normalized text is obtained with [The Kestrel TTS text normalization system](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)).\n", - "\n", - "Although a large fraction of this dataset can be reused for ITN by swapping input with output, the dataset is not bijective. \n", - "\n", - "For example: `1,000 -> one thousand`, `1000 -> one thousand`, `3:00pm -> three p m`, `3 pm -> three p m` are valid data samples for normalization but the inverse does not hold for ITN. \n", - "\n", - "We used regex rules to disambiguate samples where possible, see `nemo_text_processing/inverse_text_normalization/clean_eval_data.py`.\n", - "\n", - "To run evaluation, the input file should follow the Google Text normalization dataset format. That is, every line of the file needs to have the format `\\t\\t` if it's trivial class or `\\t\\t` in case of a semiotic class.\n", - "\n", - "Example evaluation run: \n", - "\n", - "`python run_evaluate.py \\\n", - " --input=./en_with_types/output-00001-of-00100 \\\n", - " [--language LANGUAGE] \\\n", - " [--cat CATEGORY] \\\n", - " [--filter]`\n", - " \n", - " \n", - "Use `--cat` to specify a `CATEGORY` to run evaluation on (all other categories are going to be excluded from evaluation). With the option `--filter`, the provided data will be cleaned to avoid disambiguates (use `clean_eval_data.py` to clean up the data upfront)." - ], - "id": "italic-parish" - }, - { - "cell_type": "code", - "metadata": { - "id": "intimate-astronomy" - }, - "source": [ - "eval_text = \"\"\"PLAIN\\ton\\t\n", - "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", - "PLAIN\\tthey\\t\n", - "PLAIN\\tworked\\t\n", - "PLAIN\\tuntil\\t\n", - "TIME\\t12:00\\ttwelve o'clock\n", - "\\t\n", - "\"\"\"\n", - "\n", - "INPUT_FILE_EVAL = f'{DATA_DIR}/test_eval.txt'\n", - "\n", - "with open(INPUT_FILE_EVAL, 'w') as f:\n", - " f.write(eval_text)\n", - "! cat $INPUT_FILE_EVAL" - ], - "id": "intimate-astronomy", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "corporate-contest" - }, - "source": [ - "! python $NEMO_TOOLS_PATH/run_evaluate.py --input=$INPUT_FILE_EVAL --language='en'" - ], - "id": "corporate-contest", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "raised-exemption" - }, - "source": [ - "`run_evaluate.py` call will output both **sentence level** and **token level** accuracies. \n", - "For our example, the expected output is the following:\n", - "\n", - "```\n", - "Loading training data: data_dir/test_eval.txt\n", - "Sentence level evaluation...\n", - "- Data: 1 sentences\n", - "100% 1/1 [00:00<00:00, 58.42it/s]\n", - "- Denormalized. Evaluating...\n", - "- Accuracy: 1.0\n", - "Token level evaluation...\n", - "- Token type: PLAIN\n", - " - Data: 4 tokens\n", - "100% 4/4 [00:00<00:00, 504.73it/s]\n", - " - Denormalized. Evaluating...\n", - " - Accuracy: 1.0\n", - "- Token type: DATE\n", - " - Data: 1 tokens\n", - "100% 1/1 [00:00<00:00, 118.95it/s]\n", - " - Denormalized. Evaluating...\n", - " - Accuracy: 1.0\n", - "- Token type: TIME\n", - " - Data: 1 tokens\n", - "100% 1/1 [00:00<00:00, 230.44it/s]\n", - " - Denormalized. Evaluating...\n", - " - Accuracy: 1.0\n", - "- Accuracy: 1.0\n", - " - Total: 6 \n", - "\n", - "Class | Num Tokens | Denormalization\n", - "sent level | 1 | 1.0 \n", - "PLAIN | 4 | 1.0 \n", - "DATE | 1 | 1.0 \n", - "CARDINAL | 0 | 0 \n", - "LETTERS | 0 | 0 \n", - "VERBATIM | 0 | 0 \n", - "MEASURE | 0 | 0 \n", - "DECIMAL | 0 | 0 \n", - "ORDINAL | 0 | 0 \n", - "DIGIT | 0 | 0 \n", - "MONEY | 0 | 0 \n", - "TELEPHONE | 0 | 0 \n", - "ELECTRONIC | 0 | 0 \n", - "FRACTION | 0 | 0 \n", - "TIME | 1 | 1.0 \n", - "ADDRESS | 0 | 0 \n", - "```" - ], - "id": "raised-exemption" - }, - { - "cell_type": "markdown", - "metadata": { - "id": "imported-literacy" - }, - "source": [ - "# C++ deployment\n", - "\n", - "The instructions on how to export `Pynini` grammars and to run them with `Sparrowhawk`, could be found at [NeMo/tools/text_processing_deployment](https://github.com/NVIDIA/NeMo/tree/main/tools/text_processing_deployment)." - ], - "id": "imported-literacy" - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bronze-nerve" - }, - "source": [ - "# WFST and Common Pynini Operations\n", - "\n", - "Finite-state acceptor (or FSA) is a finite state automaton that has a finite number of states and no output. FSA either accepts (when the matching patter is found) or rejects a string (no match is found). " - ], - "id": "bronze-nerve" - }, - { - "cell_type": "code", - "metadata": { - "id": "heavy-distance" - }, - "source": [ - "print([byte for byte in bytes('fst', 'utf-8')])\n", - "\n", - "# create an acceptor from a string\n", - "pynini.accep('fst')" - ], - "id": "heavy-distance", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "brave-avatar" - }, - "source": [ - "Here `0` - is a start note, `1` and `2` are the accept nodes, while `3` is a finite state.\n", - "By default (token_type=\"byte\", `Pynini` interprets the string as a sequence of bytes, assigning one byte per arc. \n", - "\n", - "A finite state transducer (FST) not only matches the pattern but also produces output according to the defined transitions." - ], - "id": "brave-avatar" - }, - { - "cell_type": "code", - "metadata": { - "id": "paperback-female" - }, - "source": [ - "# create an FST\n", - "pynini.cross('fst', 'FST')" - ], - "id": "paperback-female", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "blond-hypothetical" - }, - "source": [ - "Pynini supports the following operations:\n", - "\n", - "- `closure` - Computes concatenative closure.\n", - "- `compose` - Constructively composes two FSTs.\n", - "- `concat` - Computes the concatenation (product) of two FSTs.\n", - "- `difference` - Constructively computes the difference of two FSTs.\n", - "- `invert` - Inverts the FST's transduction.\n", - "- `optimize` - Performs a generic optimization of the FST.\n", - "- `project` - Converts the FST to an acceptor using input or output labels.\n", - "- `shortestpath` - Construct an FST containing the shortest path(s) in the input FST.\n", - "- `union`- Computes the union (sum) of two or more FSTs.\n", - "\n", - "\n", - "The list of most commonly used `Pynini` operations could be found [https://github.com/kylebgorman/pynini/blob/master/CHEATSHEET](https://github.com/kylebgorman/pynini/blob/master/CHEATSHEET). \n", - "\n", - "Pynini examples could be found at [https://github.com/kylebgorman/pynini/tree/master/pynini/examples](https://github.com/kylebgorman/pynini/tree/master/pynini/examples).\n", - "Use `help()` to explore the functionality. For example:" - ], - "id": "blond-hypothetical" - }, - { - "cell_type": "code", - "metadata": { - "id": "arctic-firewall" - }, - "source": [ - "help(pynini.union)" - ], - "id": "arctic-firewall", - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "continued-optimum" - }, - "source": [ - "# NeMo ITN API" - ], - "id": "continued-optimum" - }, - { - "cell_type": "markdown", - "metadata": { - "id": "twenty-terrorist" - }, - "source": [ - "NeMo ITN defines the following APIs that are called in sequence:\n", - "\n", - "- `find_tags() + select_tag()` - creates a linear automaton from the input string and composes it with the final classification WFST, which transduces numbers and inserts semantic tags. \n", - "- `parse()` - parses the tagged string into a list of key-value items representing the different semiotic tokens.\n", - "- `generate_permutations()` - takes the parsed tokens and generates string serializations with different reorderings of the key-value items. This is important since WFSTs can only process input linearly, but the word order can change from spoken to written form (e.g., `three dollars -> $3`). \n", - "- `find_verbalizer() + select_verbalizer` - takes the intermediate string representation and composes it with the final verbalization WFST, which removes the tags and returns the written form. \n", - "\n", - "![alt text](images/pipeline.png \"Inverse Text Normalization Pipeline\")" - ], - "id": "twenty-terrorist" - }, - { - "cell_type": "markdown", - "metadata": { - "id": "twenty-charles" - }, - "source": [ - "# References and Further Reading:\n", - "\n", - "\n", - "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", - "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", - "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", - "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" - ], - "id": "twenty-charles" - } - ] -} \ No newline at end of file diff --git a/tutorials/text_processing/README.md b/tutorials/text_processing/README.md deleted file mode 100644 index 07e4ac0ea5b5..000000000000 --- a/tutorials/text_processing/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# NeMo Text Processing Tutorials - -The NeMo Text Processing module provides support for both Text Normalization (TN) and -Inverse Text Normalization (ITN) in order to aid upstream and downstream text processing. -The included tutorials are intended to help you quickly become familiar with the interface -of the module, as well as guiding you in creating and deploying your own grammars for individual -text processing needs. - -If you wish to learn more about how to use NeMo's for Text Normalization tasks (e.g. conversion -of symbolic strings to verbal form - such as `15` -> "fifteen"), please see the [`Text Normalization`](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_Normalization.ipynb) -tutorial. - -If you wish to learn more about Inverse Text Normalization - the inverse task of converting -from verbalized strings to symbolic written form, as may be encountered in downstream ASR - -consult the [`Inverse Text Normalization`](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb) tutorial. - -For those curious about constructing grammars tailored to specific languages and use cases, -you may be interested in working through the [`WFST Tutorial`](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb), which goes through NeMo's Normalization -process in detail. - -As NeMo Text Processing utilizes Weighted Finite State Transducer (WFST) graphs to construct its -grammars, a working knowledge of [Finite State Automata](https://en.wikipedia.org/wiki/Finite-state_machine) (FSA) and/or regular languages is suggested. -Further, we recommend becoming functionally familiar with the [`pynini` library](https://www.openfst.org/twiki/bin/view/GRM/Pynini) - which functions -as the backend for graph construction - and [Sparrowhawk](https://github.com/google/sparrowhawk) - which NeMo utilizes for grammar deployment. \ No newline at end of file diff --git a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb new file mode 100755 index 000000000000..2f9dc0c7f8aa --- /dev/null +++ b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb @@ -0,0 +1,444 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a5fA5qAm5Afg" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Optional: Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **_NOTE:_** Find the official NeMo documentation at \n", + "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Overview\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F-IrnmXMTevr" + }, + "source": [ + "A sentence can be split up into semiotic tokens stemming from a varity of classes, where the spoken form differs from the written form. Examples are *dates*, *decimals*, *cardinals*, *measures* etc. The good TN or ITN system will be able to handle a variety of **semiotic classes**." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-IT1Xr9iW2Xr" + }, + "source": [ + "# How to use\n", + "## 1. Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Install NeMo, which installs both nemo and nemo_text_processing package\n", + "BRANCH = 'r1.9.0'\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# try to import of nemo_text_processing an other dependencies\n", + "import nemo_text_processing\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Text Normalization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Bfs7fa9lXDDh" + }, + "outputs": [], + "source": [ + "# create text normalization instance that works on cased input\n", + "from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "normalizer = Normalizer(input_case='cased', lang='en')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# the normalizer class offers the following parameterization. \n", + "print(normalizer.__doc__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **_NOTE:_** Standard Text Normalization uses `determistic=True`, outputting a single output for a given input string\n", + "\n", + "\n", + "\n", + "#### 2.1 Run TN on input string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Normalizer.normalize() offers the following parameterization\n", + "print(normalizer.normalize.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run normalization on example string input\n", + "written = \"We paid $123 for this desk.\"\n", + "normalized = normalizer.normalize(written, verbose=True, punct_post_process=True)\n", + "print(normalized)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "intermediate semtiotic class information is shown if verbose=True." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "### 2.1 Run TN on list of input strings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UD-OuFmEOX3T" + }, + "outputs": [], + "source": [ + "# create temporary data folder and example input file\n", + "DATA_DIR = 'tmp_data_dir'\n", + "os.makedirs(DATA_DIR, exist_ok=True)\n", + "INPUT_FILE = f'{DATA_DIR}/inference.txt'\n", + "! echo -e 'The alarm went off at 10:00a.m. \\nI received $123' > $INPUT_FILE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d4T0gXHwY3JZ" + }, + "outputs": [], + "source": [ + "# check input file was properly created\n", + "! cat $INPUT_FILE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load input file into 'data' - a list of strings\n", + "data = []\n", + "with open(INPUT_FILE, 'r') as fp:\n", + " for line in fp:\n", + " data.append(line.strip())\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "F5wSJTI8ZFRg" + }, + "outputs": [], + "source": [ + "# run normalization on 'data'\n", + "normalizer.normalize_list(data, punct_post_process=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RMT5lkPYzZHK" + }, + "source": [ + "### 2.2 Evaluate TN on written-normalized text pairs \n", + "\n", + "The evaluation data needs to have the following format:\n", + "\n", + "'on 22 july 2022 they worked until 12:00' and the normalization is represented as " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# example evaluation sentence\n", + "eval_text = \"\"\"PLAIN\\ton\\t\n", + "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", + "PLAIN\\tthey\\t\n", + "PLAIN\\tworked\\t\n", + "PLAIN\\tuntil\\t\n", + "TIME\\t12:00\\ttwelve o'clock\n", + "\\t\n", + "\"\"\"\n", + "EVAL_FILE = f'{DATA_DIR}/eval.txt'\n", + "with open(EVAL_FILE, 'w') as fp:\n", + " fp.write(eval_text)\n", + "! cat $EVAL_FILE" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RMT5lkPYzZHK" + }, + "source": [ + "That is, every sentence is broken into semiotic tokens line by line and concluded by end of sentence token ``. In case of a plain token it's `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] `, otherwise `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] [NORMALIZED]`.\n", + "This format was introduced in [Google Text normalization dataset](https://arxiv.org/abs/1611.00068). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Parse evaluation file into written and normalized sentence pairs\n", + "from nemo_text_processing.text_normalization.data_loader_utils import load_files, training_data_to_sentences\n", + "eval_data = load_files([EVAL_FILE])\n", + "sentences_un_normalized, sentences_normalized, sentences_class_types = training_data_to_sentences(eval_data)\n", + "print(list(zip(sentences_un_normalized, sentences_normalized)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run prediction\n", + "sentences_prediction = normalizer.normalize_list(sentences_un_normalized)\n", + "print(sentences_prediction)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# measure sentence accuracy\n", + "from nemo_text_processing.text_normalization.data_loader_utils import evaluate\n", + "sentences_accuracy = evaluate(\n", + " preds=sentences_prediction, labels=sentences_normalized, input=sentences_un_normalized\n", + " )\n", + "print(\"- Accuracy: \" + str(sentences_accuracy))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Inverse Text Normalization\n", + "ITN supports equivalent API as TN. Here we are only going to show inverse normalization on input string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create inverse text normalization instance\n", + "from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer\n", + "inverse_normalizer = InverseNormalizer(lang='en')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run ITN on example string input\n", + "spoken = \"we paid one hundred twenty three dollars for this desk\"\n", + "un_normalized = inverse_normalizer.inverse_normalize(spoken, verbose=True)\n", + "print(un_normalized)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Audio-based Text Normalization\n", + "Audio-based text normalization uses extended [WFST](https://en.wikipedia.org/wiki/Finite-state_machine) grammars to provide a range of possible normalization options.\n", + "The following example shows the workflow: (Disclaimer: exact values in graphic do not need to be real system's behavior)\n", + "1. text \"627\" is sent to extended TN WFST grammar\n", + "2. grammar output 5 different options of verbalization based on text input alone\n", + "3. in case an audio file is presented we compare the audio transcript with the verbalization options to find out which normalization is correct based on character error rate. The transcript is generated using a pretrained NeMo ASR model. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following shows an example of how to generate multiple normalization options:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import non-deterministic WFST-based TN module\n", + "from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize normalizer, this may take some time to generate the extended grammars. \n", + "# Thus, we recommend to cache the grammars by specifying a cache directory\n", + "normalizer = NormalizerWithAudio(\n", + " lang=\"en\",\n", + " input_case=\"cased\",\n", + " overwrite_cache=False,\n", + " cache_dir=\"cache_dir\",\n", + " )\n", + "# create up to 10 normalization options\n", + "print(normalizer.normalize(\"123\", n_tagged=10, punct_post_process=True))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Parallel execution\n", + "\n", + "`Normalizer.normalize()` as well as `InverseNormalizer.inverse_normalize()` are functions without side effect.\n", + "Thus, if you need to normalize large amounts of input examples, these can be executed in parallel." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ENMDNl9C4TkF" + }, + "source": [ + "# Tutorial on how to customize grammars\n", + "\n", + "https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lcvT3P2lQ_GS" + }, + "source": [ + "# References and Further Reading:\n", + "\n", + "\n", + "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", + "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", + "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", + "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [ + "lcvT3P2lQ_GS" + ], + "name": "Text_Normalization_Tutorial.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/tutorials/text_processing/Text_Normalization.ipynb b/tutorials/text_processing/Text_Normalization.ipynb deleted file mode 100755 index e3273ad1738c..000000000000 --- a/tutorials/text_processing/Text_Normalization.ipynb +++ /dev/null @@ -1,395 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "accelerator": "GPU", - "colab": { - "name": "Text_Normalization_Tutorial.ipynb", - "private_outputs": true, - "provenance": [], - "collapsed_sections": [ - "lcvT3P2lQ_GS" - ], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.8" - } - }, - "cells": [ - { - "cell_type": "code", - "metadata": { - "id": "a5fA5qAm5Afg" - }, - "source": [ - "if 'google.colab' in str(get_ipython()):\n", - " !pip install -q condacolab\n", - " import condacolab\n", - " condacolab.install()" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "x0DJqotopcyb", - "collapsed": true - }, - "source": [ - "\"\"\"\n", - "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", - "4. Run this cell to set up dependencies.\n", - "\"\"\"\n", - "# If you're using Google Colab and not running locally, run this cell\n", - "\n", - "# install NeMo\n", - "BRANCH = 'r1.9.0'\n", - "if 'google.colab' in str(get_ipython()):\n", - " !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "nYsp3SH24Tj_" - }, - "source": [ - "if 'google.colab' in str(get_ipython()):\n", - " ! conda install -c conda-forge pynini=2.1.3\n", - " ! mkdir images\n", - " ! wget https://github.com/NVIDIA/NeMo/blob/$BRANCH/tutorials/text_processing/images/deployment.png -O images/deployment.png\n", - " ! wget https://github.com/NVIDIA/NeMo/blob/$BRANCH/tutorials/text_processing/images/pipeline.png -O images/pipeline.png" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "CH7yR7cSwPKr" - }, - "source": [ - "import os\n", - "import wget\n", - "import pynini\n", - "import nemo_text_processing\n" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "F-IrnmXMTevr" - }, - "source": [ - "# Task Description\n", - "\n", - "Text normalization (TN) is a part of the Text-To-Speech (TTS) pre-processing pipeline. It could also be used for pre-processing Automatic Speech Recognition (ASR) training transcripts.\n", - "\n", - "TN is the task of converting text in written form to its spoken form to improve TTS. For example, `10:00` should be changed to `ten o'clock` and `10kg` to `ten kilograms`." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xXRARM8XtK_g" - }, - "source": [ - "# NeMo Text Normalization\n", - "\n", - "NeMo TN is based on weighted finite-state\n", - "transducer (WFST) grammars. The tool uses [`Pynini`](https://github.com/kylebgorman/pynini) to construct WFSTs, and the created grammars can be exported and integrated into [`Sparrowhawk`](https://github.com/google/sparrowhawk) (an open-source version of [The Kestrel TTS text normalization system](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)) for production. The NeMo TN tool can be seen as a Python extension of `Sparrowhawk`. \n", - "\n", - "Currently, NeMo TN provides support for English and the following semiotic classes from the [Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish):\n", - "DATE, CARDINAL, MEASURE, DECIMAL, ORDINAL, MONEY, TIME, TELEPHONE, ELECTRONIC, PLAIN. We additionally added the class `WHITELIST` for all whitelisted tokens whose verbalizations are directly looked up from a user-defined list.\n", - "\n", - "The toolkit is modular, easily extendable, and can be adapted to other languages and tasks like [inverse text normalization](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb). The Python environment enables an easy combination of text covering grammars with NNs. \n", - "\n", - "The rule-based system is divided into a classifier and a verbalizer following [Google's Kestrel](https://www.researchgate.net/profile/Richard_Sproat/publication/277932107_The_Kestrel_TTS_text_normalization_system/links/57308b1108aeaae23f5cc8c4/The-Kestrel-TTS-text-normalization-system.pdf) design: the classifier is responsible for detecting and classifying semiotic classes in the underlying text, the verbalizer the verbalizes the detected text segment. \n", - "In the example `The alarm goes off at 10:30 a.m.`, the tagger for TIME detects `10:30 a.m.` as a valid time data with `hour=10`, `minutes=30`, `suffix=a.m.`, the verbalizer then turns this into `ten thirty a m`.\n", - "\n", - "The overall NeMo TN pipeline from development in `Pynini` to deployment in `Sparrowhawk` is shown below (example for ITN):\n", - "![alt text](images/deployment.png \"Inverse Text Normalization Pipeline\")\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-IT1Xr9iW2Xr" - }, - "source": [ - "# Quick Start\n", - "\n", - "## Add TN to your Python TTS pre-processing workflow\n", - "\n", - "TN is a part of the `nemo_text_processing` package which is installed with `nemo_toolkit`. Installation instructions could be found [here](https://github.com/NVIDIA/NeMo/tree/main/README.rst)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Bfs7fa9lXDDh" - }, - "source": [ - "from nemo_text_processing.text_normalization.normalize import Normalizer\n", - "# creates normalizer object that works on lower cased input\n", - "normalizer = Normalizer(input_case='cased', lang='en')\n", - "raw_text = \"We paid $123 for this desk.\"\n", - "normalizer.normalize(raw_text, verbose=False)" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "w5sX0SXbXoZp" - }, - "source": [ - "In the above cell, `$123` would be converted to `one hundred twenty three dollars`, and the rest of the words remain the same.\n", - "\n", - "## Run Text Normalization on an input from a file\n", - "\n", - "Use `run_predict.py` to convert a written format from a file `INPUT_FILE` to a spoken text and save the output to `OUTPUT_FILE`. Under the hood, `run_predict.py` is calling `normalize()` (see the above section)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "UD-OuFmEOX3T" - }, - "source": [ - "# If you're running the notebook locally, update the NEMO_TEXT_PROCESSING_PATH below\n", - "# In Colab, a few required scripts will be downloaded from NeMo github\n", - "\n", - "NEMO_TOOLS_PATH = '/nemo_text_processing/text_normalization'\n", - "DATA_DIR = 'data_dir'\n", - "os.makedirs(DATA_DIR, exist_ok=True)\n", - "\n", - "if 'google.colab' in str(get_ipython()):\n", - " NEMO_TOOLS_PATH = '.'\n", - "\n", - " required_files = ['run_predict.py',\n", - " 'run_evaluate.py']\n", - " for file in required_files:\n", - " if not os.path.exists(file):\n", - " file_path = 'https://raw.githubusercontent.com/NVIDIA/NeMo/' + BRANCH + '/nemo_text_processing/text_normalization/' + file\n", - " print(file_path)\n", - " wget.download(file_path)\n", - "elif not os.path.exists(NEMO_TOOLS_PATH):\n", - " raise ValueError(f'update path to NeMo root directory')" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "d4T0gXHwY3JZ" - }, - "source": [ - "INPUT_FILE = f'{DATA_DIR}/test.txt'\n", - "OUTPUT_FILE = f'{DATA_DIR}/test_tn.txt'\n", - "\n", - "! echo \"The alarm went off at 10:00.\" > $DATA_DIR/test.txt\n", - "! cat $INPUT_FILE\n", - "! python $NEMO_TOOLS_PATH/run_predict.py --input=$INPUT_FILE --output=$OUTPUT_FILE --language='en'" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "F5wSJTI8ZFRg" - }, - "source": [ - "# check that the raw text was converted to the spoken form\n", - "! cat $OUTPUT_FILE" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RMT5lkPYzZHK" - }, - "source": [ - "## Run evaluation\n", - "\n", - "[Google Text normalization dataset](https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish) consists of 1.1 billion words of English text from Wikipedia, divided across 100 files. The normalized text is obtained with The Kestrel TTS text normalization system).\n", - "\n", - "To run evaluation, the input file should follow the Google Text normalization dataset format. That is, every line of the file needs to have the format `\\t\\t` if it's trivial class or `\\t\\t` in case of a semiotic class.\n", - "\n", - "\n", - "Example evaluation run:\n", - "\n", - "\n", - "`python run_evaluate.py \\\n", - " --input=./en_with_types/output-00001-of-00100 \\\n", - " [--language LANGUAGE] \\\n", - " [--input_case INPUT_CASE] \\\n", - " [--cat CATEGORY]`\n", - "\n", - "Use `--cat` to specify a `CATEGORY` to run evaluation on (all other categories are going to be excluded from evaluation). The option `--input_case` tells the algorithm that the input is either lower cased or cased.\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "u4zjeVVv-UXR" - }, - "source": [ - "eval_text = \"\"\"PLAIN\\ton\\t\n", - "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", - "PLAIN\\tthey\\t\n", - "PLAIN\\tworked\\t\n", - "PLAIN\\tuntil\\t\n", - "TIME\\t12:00\\ttwelve o'clock\n", - "\\t\n", - "\"\"\"\n", - "INPUT_FILE_EVAL = f\"{DATA_DIR}/test_eval.txt\"\n", - "with open(INPUT_FILE_EVAL, 'w') as fp:\n", - " fp.write(eval_text)\n", - "! cat $INPUT_FILE_EVAL" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "G7_5oXpObizP" - }, - "source": [ - "! python $NEMO_TOOLS_PATH/run_evaluate.py --input=$INPUT_FILE_EVAL --language='en'" - ], - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bIvKBwRcH_9W" - }, - "source": [ - "`run_evaluate.py` call will output both **sentence level** and **token level** accuracies. \n", - "For our example, the expected output is the following:\n", - "\n", - "```\n", - "Loading training data: data_dir/test_eval.txt\n", - "Sentence level evaluation...\n", - "- Data: 1 sentences\n", - "100% 1/1 [00:00<00:00, 14.24it/s]\n", - "- Normalized. Evaluating...\n", - "- Accuracy: 1.0\n", - "Token level evaluation...\n", - "- Token type: PLAIN\n", - " - Data: 4 tokens\n", - "100% 4/4 [00:00<00:00, 239.56it/s]\n", - " - Denormalized. Evaluating...\n", - " - Accuracy: 1.0\n", - "- Token type: DATE\n", - " - Data: 1 tokens\n", - "100% 1/1 [00:00<00:00, 33.69it/s]\n", - " - Denormalized. Evaluating...\n", - " - Accuracy: 1.0\n", - "- Token type: TIME\n", - " - Data: 1 tokens\n", - "100% 1/1 [00:00<00:00, 94.84it/s]\n", - " - Denormalized. Evaluating...\n", - " - Accuracy: 1.0\n", - "- Accuracy: 1.0\n", - " - Total: 6 \n", - "\n", - " - Total: 6 \n", - "\n", - "Class | Num Tokens | Normalization\n", - "sent level | 1 | 1.0 \n", - "PLAIN | 4 | 1.0 \n", - "DATE | 1 | 1.0 \n", - "CARDINAL | 0 | 0 \n", - "LETTERS | 0 | 0 \n", - "VERBATIM | 0 | 0 \n", - "MEASURE | 0 | 0 \n", - "DECIMAL | 0 | 0 \n", - "ORDINAL | 0 | 0 \n", - "DIGIT | 0 | 0 \n", - "MONEY | 0 | 0 \n", - "TELEPHONE | 0 | 0 \n", - "ELECTRONIC | 0 | 0 \n", - "FRACTION | 0 | 0 \n", - "TIME | 1 | 1.0 \n", - "ADDRESS | 0 | 0 \n", - "\n", - "```\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "L85ZaUJ_4TkF" - }, - "source": [ - "# C++ deployment\n", - "\n", - "The instructions on how to export `Pynini` grammars and to run them with `Sparrowhawk`, could be found at [NeMo/tools/text_processing_deployment](https://github.com/NVIDIA/NeMo/tree/main/tools/text_processing_deployment)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ENMDNl9C4TkF" - }, - "source": [ - "# WFST and Common Pynini Operations\n", - "\n", - "See [NeMo Text Inverse Normalization Tutorial](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb) for details." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lcvT3P2lQ_GS" - }, - "source": [ - "# References and Further Reading:\n", - "\n", - "\n", - "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", - "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", - "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", - "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" - ] - } - ] -} \ No newline at end of file diff --git a/tutorials/text_processing/WFST_Tutorial.ipynb b/tutorials/text_processing/WFST_Tutorial.ipynb index f714c6a6be54..981a2d5cef5c 100644 --- a/tutorials/text_processing/WFST_Tutorial.ipynb +++ b/tutorials/text_processing/WFST_Tutorial.ipynb @@ -1,7196 +1,7050 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Qq1Hz6CKWdwl", - "outputId": "3d8f5bd6-f10e-431d-9039-eb88164fbb95" - }, - "outputs": [], - "source": [ - "### WARNING: This notebook will not work in a Colab environment. \n", - "\n", - "BRANCH= 'r1.9.0'\n", - "\n", - "!git clone -b $BRANCH https://github.com/NVIDIA/NeMo\n", - "%cd NeMo\n", - "!./reinstall.sh" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pynini\n", - "import nemo_text_processing\n", - "\n", - "from pynini.lib import pynutil" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, NEMO_DIGIT, delete_space, NEMO_SIGMA, NEMO_NOT_QUOTE, delete_extra_space, NEMO_NON_BREAKING_SPACE\n", - "from nemo_text_processing.text_normalization.normalize import Normalizer\n", - "\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.decimal import DecimalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.money import MoneyFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.ordinal import OrdinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.punctuation import PunctuationFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.time import TimeFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.whitelist import WhiteListFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.word import WordFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.cardinal import CardinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.decimal import DecimalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.money import MoneyFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.ordinal import OrdinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.time import TimeFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.whitelist import WhiteListFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.word import WordFst\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "T0JxcvuPHvn9" - }, - "source": [ - "NeMo's Text Processing module uses Weighted Finite State Transducers (WFST) to deploy grammars for both efficient text normalization (TN) and inverse text normalization (ITN). In this tutorial, you will learn to build a normalization grammar from the ground up to use in your own text processing tasks. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Table of Contents\n", - "-
WFSTs\n", - "- NeMo Text Processing\n", - "- Getting Started\n", - "- Cardinal WFST\n", - "- Ordinal WFST\n", - "- Decimal WFST\n", - "- Money WFST\n", - "- Time WFST\n", - "- WhiteList WFST\n", - "- Word and Punctuation WFST\n", - "- Other Classes\n", - "- Tokenize and Classify\n", - "- Verbalize and Verbalize Final\n", - "- Deployment" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lMUovcMsfXyI" - }, - "source": [ - "# WFSTs " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Y1ejNMLbH1jM" - }, - "source": [ - "WFSTs are a form of [Finite State Machines](https://en.wikipedia.org/wiki/Finite-state_machine) used to graph relations between regular languages (or [regular expressions](https://en.wikipedia.org/wiki/Regular_expression)). For our purposes, they can be defined by two major properties:\n", - "\n", - "1. Mappings between accepted input and output expressions for text substitution\n", - "2. Path weighting to direct graph traversal" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nNg45ZuaP_A8" - }, - "source": [ - "For example, consider a simple normalization task of mapping the word \"cent\" (French for \"one hundred\") to the numerical representation `100`. We would begin with a Finite State representation of the regex `/cent/`:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uxo7gUkW_XKT" - }, - "source": [ - "![cent.png](images/cent.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fahsjMVFlbCa" - }, - "source": [ - "And then create a mapping to the text string `100`:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IMJ-fNSk_jXC" - }, - "source": [ - "![cent_to_100.png](images/cent_to_100.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bPKW0I4yAGUb" - }, - "source": [ - "*Note: Null characters are expressed as `ε` by convention*" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_0NK3aW5nG9C" - }, - "source": [ - "This would give us a WFST with universal path weights. (By default, `pynini` uses [tropical semirings](https://en.wikipedia.org/wiki/Tropical_semiring) for arcs, giving each arc a default weight of `0`.)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CzBc9D3qTGJ-" - }, - "source": [ - "Now, let us consider expanding our model. To indicate values between `100` and `200`, French uses the number scheme of `cent + digit`. For example, `120` would be pronounced as \"cent-vingt\". To create the appropriate output string, we would now want to map \"cent\" to `1` and the remaining aspect of our string to the appropriate digit representation." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GRrKNQRjFDoL" - }, - "source": [ - "![cent_vingt_to_120.png](images/cent_vingt_to_120.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jLpm4mufAfUz" - }, - "source": [ - "However this would make our graph [non-deterministic](https://en.wikipedia.org/wiki/Nondeterministic_algorithm) - it will have multiple possibilities for termination. Now an input of \"cent-vingt\" could have the outcome of `100` or `10020` when only one is correct. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![cent_vingt_bad.png](images/cent_vingt_bad.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "c-GJTpgIAf7S" - }, - "source": [ - "To correct this, we may add a new end state and a weight to the path that accepts the input without `s`:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6GJcsdttGg_S" - }, - "source": [ - "![cent_vingt_good.png](images/cent_vingt_good.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "mHft1gzsAipc" - }, - "source": [ - "Now, we can guarantee an ideal mapping by relying on a shortest-path (smallest-weight) heuristic: traversal of the graph will prioritize longer inputs, only converting \"cent\" to `100` when a larger input isn't available. As such, we've now removed the undesired output `10020` while preserving our desired coverage in string mapping. \n", - "\n", - "This use of weights to ensure predictable behavior allows WFSTs to exploit the efficiency of standard graph traversal algorithms while also maintaining versatility. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8Ik4PBXafSSB" - }, - "source": [ - "# NeMo Text Processing " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b2fcWKhqYVF5" - }, - "source": [ - "Following [Google's Kestrel](https://www.researchgate.net/publication/277932107_The_Kestrel_TTS_text_normalization_system) framework, NeMo deploys two composite WFSTs for text normalization. They are as follows:\n", - "1. A *classifier* (or tagger) to label potential tokens by 'semiotic class' (e.g. currency, ordinal number, street address)\n", - "2. A *verbalizer* to render a tagged token in conventional written form\n", - "\n", - "For example, consider the sentence: <>\n", - "\n", - "For an ITN task, a tokenizer would identify the following tokens:\n", - "\n", - "`[\"le\" ,\"premier\", \"juillet\", \"il\", \"a\", \"mangé\", \"trente-cinq\", \"pommes\"]`\n", - "\n", - "and provide each a class token: \n", - "\n", - "- `tokens { name: \"le\" }`\n", - "- `tokens { date { day: \"1\" month: \"juillet\" } } ` \n", - "- `tokens { name: \"il\" }` \n", - "- `tokens { name: \"a\" }` \n", - "- `tokens { name: \"mangé\" }`\n", - "- `tokens { cardinal { integer: \"35\" } }` \n", - "- `tokens { name: \"pommes\" }`\n", - "\n", - "These tokens are then passed to a 'verbalizer' WFST, which renders each token in a conventional written form:\n", - "\n", - "- `tokens { name: \"le\" }` -> `le` \n", - "- `tokens { date { day: \"1\" month: \"juillet\" } } ` -> `1ᵉʳ` \n", - "- `tokens { name: \"il\" }` -> `juillet`\n", - "- `tokens { name: \"il\" }` -> `il` \n", - "- `tokens { name: \"a\" }` -> `a`\n", - "- `tokens { name: \"mangé\" }` -> `mangé` \n", - "- `tokens { cardinal { integer: \"35\" } }` -> `35` \n", - "- `tokens { name: \"pommes\" }` -> `pommes`\n", - "\n", - "and merged into a normalized string:\n", - "\n", - "`le 1ᵉʳ juillet il a mangé 35 pommes`\n", - "\n", - "With the equivalent TN task being the reverse process. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_n-5JExAbvwr" - }, - "source": [ - "A few things to note: \n", - "- Each class token has a unique set of field names that must be parsed by the classifier. The default field names for NeMo are chosen to mirror the syntax in [Sparrowhawk](https://github.com/google/sparrowhawk) to enable deployment. If these fields are not exact, you will not be able to use Sparrowhawk.\n", - "- NeMo assumes no punctuation (unless explicitly provided in the grammar) and all lower casing to ease integration with upstream ASR.\n", - "- The `name` class token is default for any token that does not require processing. It will be left 'as is.'\n", - "- You may note how the tokenizer performed the conversion of `premier` to `1` while the verbalizer normalized `1` -> `1ᵉʳ`. Such decisions are implementation dependent and will vary depending on preference and language. (That is, normalization from `premier` -> `1ᵉʳ` could have been a tokenization step.)\n", - "- By default, NeMo will create several permutations of key values in a token to ease normalization. That is, given the token `tokens { date { day: \"1\" month: \"juillet\" } }`, it will also produce paths for `tokens { date { month: \"juillet\" day: \"1\" } }`. To prevent this and avoid ambiguity in verbalizer input, tokens can be assigned a `preserve_order` attribute to prevent permutation. (e.g. `tokens { date { day: \"1\" month: \"juillet\" preserve_order: true } }`) (We will discuss this [later in the tutorial](#verbalizer).)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## WFST Classes" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "NeMo Text Processing's base languages currently support only the following semiotic classes to permit integration with Sparrowhawk deployment. \n", - "\n", - "- CARDINAL\n", - "- ORDINAL\n", - "- DECIMAL\n", - "- FRACTION\n", - "- MEASURE\n", - "- MONEY\n", - "- TIME\n", - "- DATE\n", - "- ELECTRONIC\n", - "- TELEPHONE\n", - "- WHITELIST\n", - "- WORD\n", - "- PUNCTUATION\n", - "\n", - "For this tutorial, we will be focusing on the following classes:\n", - "- CARDINAL\n", - "- ORDINAL\n", - "- DECIMAL\n", - "- MONEY\n", - "- TIME\n", - "- WHITELIST\n", - "- WORD\n", - "- PUNCTUATION\n", - "\n", - "While not comprehensive, these classes will provide enough foundation and exposure to edge cases that you will feel comfortable constructing for other cases.\n", - "\n", - "**NOTE**: *If you intend to only develop for personal use with NeMo, you may rename these classes as desired. However, Sparrowhawk integration\n", - "REQUIRES use of only these tags and their assigned attributes. For list of Sparrowhawk tokens and attributes, [consult the Sparrowhawk repository](https://github.com/yzhang123/sparrowhawk/blob/test/src/proto/semiotic_classes.proto)*" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Further Reading" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If you wish to learn more about NeMo Text Processing, you may wish to consult the following:\n", - "- [Y. Zhang, E. Bakhturina, K. Gorman, and B. Ginsburg, \"NeMo Inverse Text Normalization: From Development To Production\"](https://arxiv.org/pdf/2104.05055.pdf)\n", - "- [NeMo's Text Normalization Documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nemo_text_processing/intro.html) \n", - "- [NeMo's Text Normalization Deployment Documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tools/text_processing_deployment.html)\n", - "- NeMo's [Text Normalization Tutorial](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_Normalization.ipynb) or [Inverse Text Normalization](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb) tutorials\n", - "- [Sparrowhawk Documentation](https://github.com/google/sparrowhawk)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For further information regarding WFSTs, please see:\n", - "- [D. Jufasky and J. Martin, *Natural Language Processing*, Ch. 2](https://web.stanford.edu/~jurafsky/slp3/2.pdf)\n", - "- [K. Gorman and R. Sproat, *Finite-State Text Processing*](http://www.morganclaypoolpublishers.com/catalog_Orig/product_info.php?products_id=1636)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "XFdXRcnUfI25" - }, - "source": [ - "# Getting Started \n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "K3Zl3VwqdYqL" - }, - "source": [ - "To begin tokenizer development, make sure you have [installed NeMo from source](https://github.com/NVIDIA/NeMo)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rGg7Bf13FXgc" - }, - "source": [ - "For this tutorial, we will focus on developing an Inverse Text Normalization system, such as one you may encounter in downstream ASR processing. As such, we will navigate to\n", - "`nemo_text_processing/inverse_text_normalization` and create a directory for our target language (French) and subdirectories\n", - "for `taggers` and `verbalizers`. You may also wish to create a `data` subdirectory to ease navigation.\n", - "\n", - "(Note, for text normalization, the suggested directory structure would be the same within the `nemo_text_processing/text_normalization` folder. In fact, many of NeMo's grammars actively share between.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "T58E4pU4FN3A" - }, - "outputs": [], - "source": [ - "LANGUAGE= \"MY_LANGUAGE\" # Change this to your desired language" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "_PAyEPhaFpCD", - "outputId": "23d034d2-8c93-4e8b-e3ce-5ba9e870f82d" - }, - "outputs": [], - "source": [ - "%cd nemo_text_processing/inverse_text_normalization/\n", - "!mkdir {LANGUAGE}\n", - "!mkdir \"{LANGUAGE}/taggers\"\n", - "!mkdir \"{LANGUAGE}/verbalizers\"\n", - "!mkdir \"{LANGUAGE}/data\"\n", - "%cd {LANGUAGE}\n", - "!pwd && ls" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Dependencies" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "O1vfz-bUFpwz" - }, - "source": [ - "All WFSTs deployed in NeMo inherit from the `GraphFst` class.\n", - "While in most cases you can simply import from a pre-existing `graph_utils.py`, you may occasionally find it helpful for deployment to keep a copy \n", - "in your working directory for language specific edits. (For our purposes, we will be utilizing `nemo_text_processing.text_normalization.en.graph_utils`, which serves as default for NeMo's grammars.)\n", - "\n", - "You may also wish to keep a copy of `utils.py` (found in each language system's directory)\n", - "in your working directory to assist with pathing. (Make sure to adjust the imports towards your language.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "3OME84EmOQ4h", - "outputId": "6eea17f9-aae9-4176-ae35-3d1f0e94b4ea" - }, - "outputs": [], - "source": [ - "!cp ../../text_normalization/en/graph_utils.py .\n", - "!cp ../../text_normalization/en/utils.py .\n", - "! cd ../../.." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For development, we utilize `nemo_text_processing` and `pynini` (a Python library for efficient WFST construction and traversal, installed with `NeMo-toolkit` by default). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "While this tutorial will attempt to make use of `pynini` tools transparent, it does assume some familiarity with its syntax. For a more in-depth guide, the following will provide a function overview:\n", - "\n", - "- [K. Gorman, Pynini: A Python library for weighted finite-state grammar compilation](https://aclanthology.org/W16-2409.pdf)\n", - "- [K. Gorman, Pynini Tutorial](http://wellformedness.com/courses/pynini/)\n", - "- [Pynini Documentation](https://www.openfst.org/twiki/bin/view/GRM/PyniniDocs) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will also import the `pynutil` module for access to some extra functionality, along with writing a simple helper function for printing `pynini` graphs through the previously discussed 'shortest-path' heuristic." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "sz18Ui8-8Kf4" - }, - "outputs": [], - "source": [ - "from pynini.lib import pynutil\n", - "\n", - "def apply_fst(text, fst):\n", - " \"\"\" Given a string input, returns the output string\n", - " produced by traversing the path with lowest weight.\n", - " If no valid path accepts input string, returns an\n", - " error.\n", - " \"\"\"\n", - " try:\n", - " print(pynini.shortestpath(text @ fst).string())\n", - " except pynini.FstOpError:\n", - " print(f\"Error: No valid output with given input: '{text}'\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Cardinal WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rOyLZb9DgLoh" - }, - "source": [ - "The vast majority of ITN tasks require the ability to recognize and denormalize numbers. As such, we will begin with developing a Classifier and Verbalizer for Cardinal (integer) numbers. (e.g. `-3,-2,-1,0,1,2,3,4,5....99,100,101...`)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9GZQkH1V89kh" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We will begin by first constructing a Cardinal WFST, using French as an example language. While your target language will obviously differ greatly from our example, you will likely find some several similarities, such as:\n", - "- Use of a (semi) regular decimal (base-10) counting system. (A common - but not universal - feature of natural languages.)\n", - "- Incorporation of several irregularities requiring contingencies in our WFST construction. (e.g. a pseudo vigesimal (base-20) series.)\n", - "- Use of gender and number agreement in enumeration." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Digits" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NzJ2DIwc_TT3" - }, - "source": [ - "We shall begin with the first decimal place. As these numbers serve as the building blocks for the rest of our WFST, we shall begin by explicitly calling their WFST mappings with `pynini.string_map`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "u0H4qg4BjYfB" - }, - "outputs": [], - "source": [ - "zero = pynini.string_map([(\"zéro\",\"0\")]) # French only pronounces zeroes as stand alone\n", - "digits = pynini.string_map([ # pynini function that creates explicit input-output mappings for a WFST\n", - "\t\t\t\t(\"un\",\"1\"),\n", - "\t\t\t\t(\"une\",\"1\"),\n", - "\t\t\t\t(\"deux\",\"2\"),\n", - "\t\t\t\t(\"trois\",\"3\"),\n", - "\t\t\t\t(\"quatre\",\"4\"),\n", - "\t\t\t\t(\"cinq\",\"5\"),\n", - "\t\t\t\t(\"six\",\"6\"),\n", - "\t\t\t\t(\"sept\",\"7\"),\n", - "\t\t\t\t(\"huit\",\"8\"),\n", - "\t\t\t\t(\"neuf\",\"9\")\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0nHjY-NNjdWQ" - }, - "source": [ - "We may also simply write a `tsv` file in a separate data folder " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "- zéro\t0\n", - "- un\t1\n", - "- une\t1\n", - "- deux\t2\n", - "- trois\t3\n", - "- quatre\t4\n", - "- cinq\t5\n", - "- six\t6\n", - "- sept\t7\n", - "- huit\t8\n", - "- neuf\t9" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xicKcZLEzQTg" - }, - "source": [ - "and import with `string_file`" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`digits = pynini.string_file(\"data/digits.tsv\")`\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "If utils.py is in working directory you may also use `get_abs_path`, which will always call paths relative to your {LANGUAGE} directory:\n", - "\n", - "`from nemo_text_processing.inverse_normalization.{LANGUAGE}.utils import get_abs_path`\n", - "\n", - "`digits = pynini.string_file(get_abs_path(\"data/digits.tsv\"))`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yPccmicQkYAB" - }, - "source": [ - "While we will use `string_map` throughout this tutorial, please note that NeMo employs the later option for maintainability and recommends its use instead." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Teens" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FQJiJcVMrNmC" - }, - "source": [ - "Let us consider our next set of numbers:\n", - "- 10 - dix\n", - "- 11 - onze\n", - "- 12 - douze\n", - "- 13 - treize\n", - "- 14 - quatorze\n", - "- 15 - quinze\n", - "- 16 - seize\n", - "- 17 - dix-sept\n", - "- 18 - dix-huit\n", - "- 19 - dix-neuf\n", - "\n", - "Like before, we can simply use `string_map` to compose a WFST for them. But note how there is some redundancy in the number set: `17`, `18`, and `19` are all of the form `dix + digit`. It would be more efficient simply to reuse our prior WFST in these cases than simply creating new arcs, states, and weights. \n", - "\n", - "We can achieve this using pynini's string concatenation function to extend the accepted input strings. First we will create an WFST for `11-16`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "orSgBwyXsfY5" - }, - "outputs": [], - "source": [ - "teens = pynini.string_map([\n", - "\t\t\t\t(\"onze\",\"11\"),\n", - "\t\t\t\t(\"douze\",\"12\"),\n", - "\t\t\t\t(\"treize\",\"13\"),\n", - "\t\t\t\t(\"quatorze\",\"14\"),\n", - "\t\t\t\t(\"quinze\",\"15\"),\n", - "\t\t\t\t(\"seize\",\"16\"),\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "s1yIgigdtriQ" - }, - "source": [ - "Now, we will create a `tens` WFST that is responsible for mapping all instances of \"dix\" and concatenate (accomplished with the overloaded `+` operator) with the prior `digits` WFST. (Deleting any possible hyphens in-between with a build in `delete_hyphen`.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "CzwZrFCkt87W" - }, - "outputs": [], - "source": [ - "tens = pynini.string_map([(\"dix\", \"1\")])\n", - "delete_hyphen = pynini.closure(pynutil.delete(\"-\"), 0, 1) # Applies a closure from 0-1 of operation. Equivalent to regex /?/\n", - "\n", - "graph_tens = tens + delete_hyphen + digits" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2knCwybmuTDn" - }, - "source": [ - "We now can combine the `teens` and `graph_tens` WFST together through the union operation (done with the overloaded `|` operator), allowing our choice of either graph." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "WIRJ4PE7uRrl" - }, - "outputs": [], - "source": [ - "graph_tens_and_teens = graph_tens | teens" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TGkzKoeuxbeA" - }, - "source": [ - "Let's see if it works through the string function:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "v2iD0_HnxdUV", - "outputId": "1d8f434f-ff8a-4c85-b8d0-1127e4587ddf" - }, - "outputs": [], - "source": [ - "apply_fst(\"dix-huit\", graph_tens_and_teens)\n", - "apply_fst(\"seize\", graph_tens_and_teens)\n", - "apply_fst(\"dix\", graph_tens_and_teens)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Yh2f-3rux8_2" - }, - "source": [ - "The first two worked, but why did we get an error with \"dix\"? If you look back, you'll notice that while our graph has a mapping from \"dix\" to `1` - the concatenation with `digits` makes the assumption that some input from those strings will follow. That is, we left no opportunity for an *omission* of `digits`.\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OM_eJYlV1UVp" - }, - "source": [ - "![dix_to_digits.png](images/dix_to_digits.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "M4xCMKRA1Wzw" - }, - "source": [ - "You may also note that this issue would hold also if we wanted to normalize only digits - our graph would error out since it's expecting a `tens` or input first. \n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "XJHnlJCm1dPv" - }, - "source": [ - "We can fix both of these problems by allowing an option to simply insert a zero without any extra input. (Much like our \"cent\" example.)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9_vvJ9Bl1dYQ" - }, - "source": [ - "![dix_to_digits_with_insert.png](images/dix_to_digits_with_insert.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hJq3uoMN2OcC" - }, - "source": [ - "This may be accomplished through use of the `pynutil.insert` function:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7h9xuNfA081P" - }, - "outputs": [], - "source": [ - "graph_digits = digits | pynutil.insert(\"0\") # inserts zero if no digit follows" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fA_L_6Ky2SHm" - }, - "source": [ - "And for `graph_tens`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jelVA81o2RXu" - }, - "outputs": [], - "source": [ - "tens = tens | pynutil.insert(\"0\") | tens + delete_hyphen\n", - "graph_tens = tens + graph_digits" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Gb5uhpGr3I4X" - }, - "source": [ - "Bringing everything together:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bLkDddkA3Stu" - }, - "outputs": [], - "source": [ - "graph_teens_and_tens = graph_tens | teens\n", - "graph_all = graph_teens_and_tens | zero " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DESDKScv3r3P" - }, - "source": [ - "Let us now check our tests:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "7wrDNXuD3oh9", - "outputId": "661d2526-5aa0-4640-9285-bca15cd56c75" - }, - "outputs": [], - "source": [ - "apply_fst(\"dix-huit\", graph_all) \n", - "apply_fst(\"seize\" , graph_all)\n", - "apply_fst(\"dix\" , graph_all) \n", - "apply_fst(\"une\" , graph_all) \n", - "apply_fst(\"trois\" , graph_all) \n", - "apply_fst(\"quatre\" , graph_all) \n", - "apply_fst(\"zéro\" , graph_all)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Tz_k3NoB66Bv" - }, - "source": [ - "Now we have no more error - albeit at the cost of leading zeroes. (We will take care of this later in the section.)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Tens" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2dJZAhE57an3" - }, - "source": [ - "Now that we've taken care of the teens, we can proceed with the rest of the tens. Like many languages, French employs a (fairly) regular schema of: `tens_digit + ones_digit` for 20-100. Indeed, we can summarize 20-69 in the following template:\n", - "\n", - "- 20 - vingt\n", - "- 21 - vingt-et-un\n", - "- 22 - vingt-deux\n", - "- 23 - vingt-trois\n", - "- 24 - vingt-quatre\n", - "- 25 - vingt-cinq\n", - "- 26 - vingt-six\n", - "- 27 - vingt-sept\n", - "- 28 - vingt-huit\n", - "- 29 - vingt-neuf\n", - "- 30 - trente\n", - "- 31 - trente-et-un\n", - "- 32 - trente-deux\n", - "- 33 - trente-trois\n", - "...\n", - "- 40 - quarante\n", - "...\n", - "- 50 - cinquante\n", - "...\n", - "- 60 - soixante\n", - "..." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BuaxVG35UKcs" - }, - "source": [ - "Expanding `tens` is fairly easy to accommodate this template: we simply extend our earlier `string_map` for the new terms in the 'tens place.' From there, we once again concatenate the `digits` WFST (along with a simple WFST to delete the occurrence of the \"-et-\" term that occasionally occurs.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qAnXlRkR32wt" - }, - "outputs": [], - "source": [ - "tens = pynini.string_map([\n", - "\t\t\t\t(\"dix\", \"1\"),\n", - "\t\t\t\t(\"vingt\",\"2\"),\n", - "\t\t\t\t(\"trente\",\"3\"),\n", - "\t\t\t\t(\"quarante\",\"4\"),\n", - "\t\t\t\t(\"cinquante\",\"5\"),\n", - "\t\t\t\t(\"soixante\",\"6\"),\n", - "\t\t])\n", - "\n", - "graph_et = pynutil.delete(\"-et-\")\n", - "\n", - "tens = tens | pynutil.insert(\"0\") | tens + pynutil.delete(\"-\") | tens + graph_et\n", - "\n", - "graph_tens = tens + graph_digits\n", - "graph_teens_and_tens = graph_tens | teens\n", - "graph_all = graph_teens_and_tens | zero " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-hJwqPDx8I2R" - }, - "source": [ - "#### Special Cases: 70-99" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zvBLvJdY9XPA" - }, - "source": [ - "However, things get tricky once we go beyond the 60s. Here, standard French possesses a notorious psuedo-vigecimal (base-20) system. For numbers 70-99:\n", - "\n", - "- 70 - soixante-dix <- Literally in English: \"sixty-ten\"\n", - "- 71 - soixante-et-onze <- Literally in English: \"sixty-and-eleven\"\n", - "- 72 - soixante-douze\n", - "- 73 - soixante-treize\n", - "- 74 - soixante-quatorze\n", - "- 75 - soixante-quinze\n", - "- 76 - soixante-seize\n", - "- 77 - soixante-dix-sept\n", - "- 78 - soixante-dix-huit\n", - "- 79 - soixante-dix-neuf\n", - "- 80 - quatre-vingts <- Literally in English: \"four-twenties\"\n", - "- 81 - quatre-vingt-un\n", - "- 82 - quatre-vingt-deux\n", - "- 83 - quatre-vingt-trois\n", - "- 84 - quatre-vingt-quatre\n", - "- 85 - quatre-vingt-cinq\n", - "- 86 - quatre-vingt-six\n", - "- 87 - quatre-vingt-sept\n", - "- 88 - quatre-vingt-huit\n", - "- 89 - quatre-vingt-nuef\n", - "- 90 - quatre-vingt-dix <- Literally in English: \"four-twenties-ten\"\n", - "- 91 - quatre-vingt-onze\n", - "- 92 - quatre-vingt-douze\n", - "- 93 - quatre-vingt-treize\n", - "- 94 - quatre-vingt-quatorze\n", - "- 95 - quatre-vingt-quinze\n", - "- 96 - quatre-vingt-seize\n", - "- 97 - quatre-vingt-dix-sept\n", - "- 98 - quatre-vingt-dix-huit\n", - "- 99 - quatre-vingt-dix-neuf" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "HQNiwFDyVV_3" - }, - "source": [ - "As before, we want to take advantage of as much redundancy as we can without creating additional ambiguities that will impede graph traversal. \n", - "\n", - "We first note that - despite repeating prior words - \"quatre-vingt\" can be mapped to `8` without introducing ambiguity. This is because, despite \"quatre\" and \"vingt\" being present in our prior graphs, our WFST has no pathing for them in this exact order. As such, we can simply add it to `tens` and immediately improve our coverage for 81-89. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AvJqaHhE9Wbd" - }, - "outputs": [], - "source": [ - "tens = pynini.string_map([\n", - "\t\t\t\t(\"dix\", \"1\"),\n", - "\t\t\t\t(\"vingt\",\"2\"),\n", - "\t\t\t\t(\"trente\",\"3\"),\n", - "\t\t\t\t(\"quarante\",\"4\"),\n", - "\t\t\t\t(\"cinquante\",\"5\"),\n", - "\t\t\t\t(\"soixante\",\"6\"),\n", - " (\"quatre-vingt\", \"8\")\n", - "\t\t])\n", - "tens = tens | pynutil.insert(\"0\") | tens + delete_hyphen | tens + graph_et\n", - "graph_tens = tens + graph_digits\n", - "graph_teens_and_tens = graph_tens | teens\n", - "graph_all = graph_teens_and_tens | zero " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0_DtcpZxZTzX" - }, - "source": [ - "Of course, now we permit the occurrence of:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "V2leANlDhCvj", - "outputId": "db8d5d02-c848-4e50-df23-d8499538281c" - }, - "outputs": [], - "source": [ - "apply_fst(\"quatre-vingt\", graph_all)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_X_ef3sihCHH" - }, - "source": [ - "which is invalid (French uses the plural \"quatre-vingt**s**\" here.) " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "vgKT903Y6rIQ" - }, - "source": [ - "Should we alter the grammar because of this? Such a decision will largely be dependent on your intended implementation and design aims. If you see the question of 'legal' tokens as a responsibility of your upstream model, then there is no need for any alteration: \"quatre-vingt\" as a standalone token will simply not occur, so there is no input to be concerned with.\n", - "\n", - "However, if your ITN grammars are developed for an environment with low-fidelity ASR and/or where mistaken transcriptions incur heavy loss (e.g. ASR for driving directions, telephone-numbers, banking) then you may wish to err on the side of caution." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Hf_FghLT7jdY" - }, - "source": [ - "If we wanted to go for the latter, we would want to mark that \"quatre-vingts\" maps **only** to `80`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "JliFTF3mZSsJ" - }, - "outputs": [], - "source": [ - "quatre_vingt_plural = pynini.string_map([\n", - " (\"quatre-vingts\", \"80\")\n", - "\t\t])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "81_b3XPbicT1" - }, - "source": [ - "And that \"quatre vingt\" can only accompany non-zero digits:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "E4_dmg6uin2j" - }, - "outputs": [], - "source": [ - "quatre_vingt_singular = pynini.string_map([\n", - " (\"quatre-vingt-\", \"8\") # Note that the hyphen can be assumed now\n", - "\t\t])\n", - "graph_digits_without_zero = pynini.string_map([\n", - "\t\t\t\t(\"un\",\"1\"),\n", - "\t\t\t\t(\"une\",\"1\"),\n", - "\t\t\t\t(\"deux\",\"2\"),\n", - "\t\t\t\t(\"trois\",\"3\"),\n", - "\t\t\t\t(\"quatre\",\"4\"),\n", - "\t\t\t\t(\"cinq\",\"5\"),\n", - "\t\t\t\t(\"six\",\"6\"),\n", - "\t\t\t\t(\"sept\",\"7\"),\n", - "\t\t\t\t(\"huit\",\"8\"),\n", - "\t\t\t\t(\"neuf\",\"9\")\n", - "])\n", - "graph_eighties = (quatre_vingt_singular + graph_digits_without_zero) | quatre_vingt_plural" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "mL7jpekV8VgP" - }, - "source": [ - "For the `70`'s and `90`'s, we would likewise need to form exclusive configurations for their number series, rewriting digits to recognize \"onze\", \"douze\", \"treize\"... as `1,2,3....` (Note, we'll have to separate `71` and `91` to manage \"soixante-**et**-onze\" vs. \"quatre-vingt-onze\".)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "y3dYkwK29zCX" - }, - "outputs": [], - "source": [ - "seventy_and_ninety = pynini.string_map([\n", - " (\"soixante-dix\", \"70\"),\n", - " (\"quatre-vingt-dix\", \"90\"),\n", - "\t\t])\n", - "\n", - "seventy_and_ninety_tens = pynini.string_map([\n", - " (\"soixante-\", \"7\"),\n", - " (\"quatre-vingt-\", \"9\"),\n", - "\t\t])\n", - "\n", - "seventy_and_ninety_one = pynini.string_map([\n", - " (\"soixante-et-onze\", \"71\"),\n", - " (\"quatre-vingt-onze\", \"91\"),\n", - "\t\t])\n", - "\n", - "seventy_and_ninety_digits = digits = pynini.string_map([ \n", - "\t\t\t\t(\"douze\",\"2\"),\n", - "\t\t\t\t(\"treize\",\"3\"),\n", - "\t\t\t\t(\"quatorze\",\"4\"),\n", - "\t\t\t\t(\"quinze\",\"5\"),\n", - "\t\t\t\t(\"seize\",\"6\"),\n", - "\t\t\t\t(\"dix-sept\",\"7\"), # For 97-99, digits are used as normal.\n", - "\t\t\t\t(\"dix-huit\",\"8\"),\n", - "\t\t\t\t(\"dix-neuf\",\"9\")\n", - "])\n", - "\n", - "graph_seventies_and_nineties = (seventy_and_ninety_tens + seventy_and_ninety_digits) | seventy_and_ninety | seventy_and_ninety_one " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4NCrCwEH9HVg" - }, - "source": [ - "Now we union them with our original `tens` series:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "psGCgxaH-btn" - }, - "outputs": [], - "source": [ - "tens = pynini.string_map([\n", - "\t\t\t\t(\"dix\", \"1\"),\n", - "\t\t\t\t(\"vingt\",\"2\"),\n", - "\t\t\t\t(\"trente\",\"3\"),\n", - "\t\t\t\t(\"quarante\",\"4\"),\n", - "\t\t\t\t(\"cinquante\",\"5\"),\n", - "\t\t\t\t(\"soixante\",\"6\"),\n", - "\t\t])\n", - "tens = tens | pynutil.insert(\"0\") | tens + delete_hyphen | tens + graph_et\n", - "\n", - "graph_tens = tens + graph_digits\n", - "graph_tens_with_special_cases = graph_tens | graph_seventies_and_nineties | graph_eighties\n", - "graph_teens_and_tens = graph_tens_with_special_cases | teens\n", - "graph_all = graph_teens_and_tens | zero " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xWjSAGRX_s0H" - }, - "source": [ - "Making sure test cases work:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "kapWmgos-xcn", - "outputId": "5e9c6f5c-1450-495f-cadf-2945355b651c" - }, - "outputs": [], - "source": [ - "apply_fst(\"quatre-vingt-treize\" , graph_all)\n", - "apply_fst(\"quatre-vingts\", graph_all)\n", - "apply_fst(\"quatre-vingt-deux\", graph_all)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hNUepfKZ_vS_" - }, - "source": [ - "And the other cases fail as expected:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "wo2pCOXGAgYn", - "outputId": "0bbe2792-8bc9-40f7-dd28-4745bd1390e3" - }, - "outputs": [], - "source": [ - "apply_fst(\"quatre-vingt\", graph_all)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4VPuCTTtigh-" - }, - "source": [ - "Of course, there are other ways we could have reconfigured the grammar: we could simply make specific graphs for multiples of ten (`10,20,30..`) and all cases where \"-et-\" occurs (`21,31,41,51...91`). \n", - "\n", - "But this ignores a more important question: was any of this necessary in the first place? All these extra grammars did was simply expand coverage for thirty additional cardinals. And they still didn't exclude all faulty inputs! Note the following cases:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "KICvpeewCFyH", - "outputId": "174dd910-7329-4a5f-a5b0-5e796a174217" - }, - "outputs": [], - "source": [ - "apply_fst(\"dix-une\", graph_all) # supposed to be \"onze\"\n", - "apply_fst(\"dix-deux\", graph_all) # supposed to be \"douze\"\n", - "apply_fst(\"vingt-un\", graph_all) # supposed to be \"vingt-et-un\"\n", - "apply_fst(\"trente-un\", graph_all) # supposed to be \"trente-et-un\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0D130jIVCLp2" - }, - "source": [ - "We *still* need to address possible edge cases!\n", - "\n", - "All of this is to say that knowing your input domain before construction is imperative, as small decisions can easily determine your output range later down the line.\n", - "\n", - "Indeed, if you're particularly concerned with limiting input possibilities, it may be valid simply to write all unique options within a `string_map`. While a tad inelegant, it certainly assists in controlling your outputs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "RSp9w5ayA9ii" - }, - "outputs": [], - "source": [ - "graph_tens_special = pynini.string_map([\n", - "\t\t\t\t(\"soixante-dix\", \"70\"),\n", - "\t\t\t\t(\"soixante-et-onze\",\"71\"),\n", - " (\"soixante-douze\",\"72\"),\n", - "\t\t\t\t(\"soixante-treize\",\"73\"),\n", - "\t\t\t\t(\"soizante-quatorze\",\"74\"),\n", - "\t\t\t\t(\"soixante-quinze\",\"75\"),\n", - "\t\t\t\t(\"soixante-seize\",\"76\"),\n", - " (\"soixante-dix-sept\",\"77\"),\n", - " (\"soixante-dix-huit\",\"78\"),\n", - "\t\t\t\t(\"soixante-dix-neuf\",\"79\"),\n", - " (\"quatre-vingts\", \"80\"),\n", - " (\"quatre-vingt-un\", \"81\"),\n", - " (\"quatre-vingt-une\", \"81\"),\n", - "\t\t\t\t(\"quatre-vingt-deux\",\"82\"),\n", - " (\"quatre-vingt-trois\",\"83\"),\n", - " (\"quatre-vingt-quatre\",\"84\"),\n", - " (\"quatre-vingt-cinq\",\"85\"),\n", - " (\"quatre-vingt-six\",\"86\"),\n", - " (\"quatre-vingt-sept\",\"87\"),\n", - " (\"quatre-vingt-huit\",\"88\"),\n", - " (\"quatre-vingt-neuf\",\"89\"),\n", - " (\"quatre-vingt-dix\",\"90\"),\n", - " (\"quatre-vingt-onze\",\"91\"),\n", - " (\"quatre-vingt-douze\",\"92\"),\n", - " (\"quatre-vingt-treize\",\"93\"),\n", - " (\"quatre-vingt-quatorze\",\"94\"),\n", - " (\"quatre-vingt-quinze\",\"95\"),\n", - " (\"quatre-vingt-sieze\",\"96\"),\n", - " (\"quatre-vingt-dix-sept\",\"97\"),\n", - " (\"quatre-vingt-dix-huit\",\"98\"),\n", - " (\"quatre-vingt-dix-neuf\",\"99\"),])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NUPs1qOUg-hE" - }, - "source": [ - "Which is more efficient? Once again, it is dependent on your language and implementation. If we simply visualize each graph and their number of states:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "sQ9GsIkNzxsU", - "outputId": "d70ca927-9c43-4f49-846c-c181e725e011" - }, - "outputs": [], - "source": [ - "constructed_version = (graph_seventies_and_nineties | graph_eighties)\n", - "constructed_version.num_states()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Xsgdu5TYx09_", - "outputId": "5812912f-883b-42e8-afbf-3ec4a0170345" - }, - "outputs": [], - "source": [ - "string_map_version = graph_tens_special\n", - "string_map_version.num_states()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9jzn_U7s0Sit" - }, - "source": [ - "We see that their number of states (graph vertexes) are almost equal. Yet, if we use `pynini.optimize` - a method that calls a suite of WFST minimization algorithms: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "7YtqhOY90iF0", - "outputId": "26f0f51b-b00d-4f5a-9b2f-330c9812666a" - }, - "outputs": [], - "source": [ - "constructed_version.optimize()\n", - "constructed_version.num_states()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "y93SqnOf0qa8", - "outputId": "74efcbfa-a272-4fc6-e36e-f1e31c6df221" - }, - "outputs": [], - "source": [ - "string_map_version.optimize()\n", - "string_map_version.num_states()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2cTdQj9L0xhl" - }, - "source": [ - "We see the latter possessing a significantly larger amount of graph vertices. \n", - "\n", - "So the decision will be dependent on your ITN needs, language, concern with efficiency, and design philosophy. Further, even decisions of language dialect will have an influence. \n", - "(e.g. Belgian, Canadian, and Swiss dialects of French will dispense with elements of the vigecimal system for the decimal schema.)\n", - "\n", - "**N.B.** *For reference: while `nemo_text_processing` grammars aim to minimize invalid productions, they assume input tokens are valid strings for a target language. (e.g. The mapping of \"quatre-vingt\" to `80` is permitted since it is not likely to occur in a valid French string.)* " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "V1djCnvY3CjW" - }, - "source": [ - "For more information on optimization algorithms for WFSTs, please see:\n", - "\n", - "- [M. Mohri,\"Generic epsilon-removal and input epsilon-normalization algorithms for weighted transducers\"](https://cs.nyu.edu/~mohri/pub/ijfcs.pdf)\n", - "- [M. Mohri, \"Weighted automata algorithms\"](https://cs.nyu.edu/~mohri/pub/hwa.pdf)\n", - "- [K. Thompson, \"Programming techniques: regular expression search algorithm\"](http://www.oilshell.org/archive/Thompson-1968.pdf)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Hundreds\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "dqPUdVBbi6gU" - }, - "source": [ - "\n", - "Moving on to the case of three digit cardinals (\"hundreds\"), it is likely that your chosen language becomes more regular in its schema. For instance, practically all French numbers `>100` obey the following:\n", - "\n", - "- `digit_from_1_to_9 + word_for_hundred + digit_from_1_to_99`\n", - "\n", - "For example:\n", - "- `203` - \"deux-cent-trois\"\n", - "- `530` - \"cinq-cent-trente\"\n", - "- `880` - \"huit-cent-quatre-vingt\"\n", - "\n", - "As such, we can write a simple `hundreds` WFST as:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "lOt-gc-FiF-X" - }, - "outputs": [], - "source": [ - "hundreds = graph_digits + delete_hyphen + pynutil.delete(\"cent\") + delete_hyphen + graph_all" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "Fyn1uL_NoEiz", - "outputId": "d491680b-1b3e-4762-8470-497833b82b0e" - }, - "outputs": [], - "source": [ - "apply_fst(\"deux-cent-trois\", hundreds)\n", - "apply_fst(\"huit-cent-quatre-vingts\", hundreds)\n", - "apply_fst(\"cinq-cent-trente\" , hundreds) " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qDjq_KfnoD5C" - }, - "source": [ - "Indeed, the use of French only presents two complications:\n", - "- French uses *only* the word \"cent\" for `100`. (Instead of \"un cent\".)\n", - "- 'Pure' multiples of a hundred (`200,300,400....`) use the plural \"cents\".\n", - "\n", - "The second one is the easier of the two so let's start there. There are actually two options open to us. First, we could treat \"cents\" the same way as we did \"cent\" in the base case and simply delete it. From there, the lack of any following inputs will allow the WFST to insert the trailing zeroes as appropriate." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "m2F-sumbxqLE" - }, - "outputs": [], - "source": [ - "cents = pynini.accep(\"cent\") | pynini.accep(\"cents\") # Creates a Finite State (Accep)tor, mapping inputs back to themselves\n", - "hundreds = graph_digits + delete_hyphen + pynutil.delete(cents) + delete_hyphen + graph_all" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VisQu_Etx-QB" - }, - "source": [ - "Or we can use it as a cue to 'shortcut' the WFST to immediately insert zeroes." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "VspiTN5Vxxjl" - }, - "outputs": [], - "source": [ - "graph_cents = pynini.cross(\"cents\", \"00\") # Creates a single input-output mapping\n", - "hundreds = graph_digits + delete_hyphen + ((pynutil.delete(\"cent\") + delete_hyphen + graph_all) | graph_cents)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "meVn5BiyyX5v" - }, - "source": [ - "For the case of solitary \"cent\", we need to make sure our output is `1` only in the case that no digit precedes the occurrence. Here we need to be confident in the structure of our WFST and that any possible ambiguity has been dealt with by this point. (Something to keep in mind as we move to the thousands.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "277Z-zLWyWAf" - }, - "outputs": [], - "source": [ - "graph_cent = pynini.cross(\"cent\", \"1\")\n", - "graph_hundreds_first_digit = (graph_digits + delete_hyphen + pynutil.delete(cents)) | graph_cent\n", - "graph_hundreds = graph_hundreds_first_digit + delete_hyphen + graph_all" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "FNZlJsvS_Yvt", - "outputId": "e85ae561-e7a1-4b6a-e394-f0194fdb89e7" - }, - "outputs": [], - "source": [ - "apply_fst(\"trois-cents\", graph_hundreds) \n", - "apply_fst(\"cent\", graph_hundreds)\n", - "apply_fst(\"cent-trois\", graph_hundreds) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Thousands" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "e7Dy5slLzp-K" - }, - "source": [ - "For quite a few languages, managing the WFST for the thousands place is the last aspect to figure out, as the higher powers of ten reuse the same schema. (For those working with counting systems that reserve special terms for \"ten-thousand\" (e.g. Chinese derived counting systems), you may need to extend unique coverage to the next power of ten.)\n", - "\n", - "For French, the question of thousands is rather simple: `digits_from_1_to_999 + mille + digits_from_1_to_999`\n", - "\n", - "With only the exception that any expression of one thousand drops a leading digit. \n", - "- `1,000` -> \"mille\"\n", - "- `1,001` -> \"mille-un\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AvsnAAiPzlu_" - }, - "outputs": [], - "source": [ - "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", - "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", - "\n", - "graph_thousands = (graph_one_thousand | graph_many_thousand) + delete_hyphen + graph_hundreds" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "i3m9TG7Y4tkl", - "outputId": "d3f1f81d-c463-4934-9df7-3b8f2b67798f" - }, - "outputs": [], - "source": [ - "apply_fst(\"cent-mille-deux-cents\", graph_thousands)\n", - "apply_fst(\"deux-cent-mille-deux-cents\", graph_thousands)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NoevSTZGGT17" - }, - "source": [ - "### Weighting" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "A2gcVIZM0-iv" - }, - "source": [ - "Question: will this cover all our grammar so far? (Hint: what assumptions were made about \"cent\"/\"cents\"?)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "cCFtPhr1BjAc", - "outputId": "048e0d93-a4a8-4f4e-d461-bfd70e911aff" - }, - "outputs": [], - "source": [ - "apply_fst(\"deux-mille-un\", graph_thousands)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Ne-7L9Cd4t-8" - }, - "source": [ - "Once again, we need to introduce the possibility of the prior power of ten not occurring in the string. There must be an option for simply inserting a string of `0` in place of the omitted \"cent\"." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "iockqXdn-aG4" - }, - "source": [ - "Further, we want to be careful with how cavalier we have been with insertions. Consider the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "bxJlSnj2-Xw3", - "outputId": "6722e5ef-8a7f-43e1-84fe-b3f5f18307e1" - }, - "outputs": [], - "source": [ - "apply_fst(\"mille-cent-un\", graph_thousands) # Should be 1101\n", - "apply_fst(\"mille-cent\", graph_thousands) # 1100" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fq5zEayA-kOx" - }, - "source": [ - "It appears that our WFST has developed a tendency to simply 'ignore' some of these higher powers. Let us return to our code for `graph_hundreds` and `graph_thousands`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "S2aV1KQ4-1iP" - }, - "outputs": [], - "source": [ - "graph_cents = pynini.cross(\"cents\", \"00\")\n", - "graph_cent = pynini.cross(\"cent\", \"1\")\n", - "graph_hundreds_first_digit = (graph_digits + delete_hyphen + pynutil.delete(cents)) | graph_cent\n", - "graph_hundreds = (graph_hundreds_first_digit + delete_hyphen | pynutil.insert(\"0\")) + graph_all \n", - "\n", - "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", - "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", - "graph_thousands = (graph_one_thousand | graph_many_thousand) + delete_hyphen + graph_hundreds" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9avwOIkk-9qt" - }, - "source": [ - "Recall that throughout we have provided options for simply inserting zeroes in the case of omitted numbers? That tendency has finally caught up with us. The use of our previous `graph_hundreds` in `graph_many_thousands` now allows our graph to insert a string of `0`'s without penalty. \n", - "\n", - "You may note that this is very similar to the \"cents\" example brought up at the beginning, presenting a similar solution. We can control this output by making it too costly to traverse unless absolutely necessary for the graph. This can be accomplished simply by appending a weight to the insertion for hundreds:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "MQG3j0U8CUAQ" - }, - "outputs": [], - "source": [ - "graph_hundreds = (graph_hundreds_first_digit + delete_hyphen | pynutil.insert(\"0\", weight=.1)) + graph_all \n", - "\n", - "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", - "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", - "graph_thousands = (graph_one_thousand | graph_many_thousand) + delete_hyphen + graph_hundreds" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "KNHhrYZ7Ca58", - "outputId": "a7d07372-733d-4837-c1e9-1dc58ba2b87c" - }, - "outputs": [], - "source": [ - "apply_fst(\"mille-cent-un\", graph_thousands)\n", - "apply_fst(\"mille-cent\", graph_thousands)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "51yPEaf2EkbD" - }, - "source": [ - "Why choose a weight of `.1`? Quite simply: it's arbitrary. As mentioned earlier, the default graph in `pynini` is a tropical semiring, which uses the `min` function to select among two arcs for path traversal. Since all our paths so far are weight `0`, any positive value will ensure that it is a last option among path traversal. (Note, this conversely entails any negative weight path will be prioritized.)\n", - "\n", - "That we chose this number as a small value comes from a place of caution: the tropical semiring uses an additive function to calculate the total weight of an entire path to traverse a WFST. As our grammars can easily become massive, this means that small weights can have major impact down the line. Further, by constraining path weights to small values, we can have general certainty towards the maximum weight of any individual graph, allowing us to add constraints regarding maximum token length and token hierarchy. (As explained in [later sections](#classifyweights).) As such, when using weights in a localized setting, it is best to use small values to avoid unforeseen escalation. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "iScKgvRxGt-B" - }, - "source": [ - "### Higher Powers\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rtHEd6OE2WSg" - }, - "source": [ - "At this point, we can propose a general heuristic with escalating to higher powers of ten: they always need a way for their absence to be accommodated in the WFST. Further, they require some weighting to prevent this absence from developing into a string of omitted values. To avoid further bumps, we'll take care of this now with `graph_thousands`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "iZMN7wcE2lH5" - }, - "outputs": [], - "source": [ - "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", - "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", - "graph_thousands = (graph_one_thousand | graph_many_thousand | pynutil.insert(\"000\", weight=.001)) + delete_hyphen + graph_hundreds" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Fkc3LIH824P7" - }, - "source": [ - "\n", - "For the rest of French (and many other languages), the rest of the work is simply repeating the prior pattern for the thousands element: \n", - "`hundreds + word_for_higher_power + hundreds.....` Of course there will be some variation in this schema, but the recursion should be regular. (It is rather rare that languages appropriate unique forms for these higher counts.) " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qGnK4ARX4Nay" - }, - "source": [ - "To finish French, we can list off the following equivalent for higher powers of ten:\n", - "- `million` - \"million/millions\" \n", - "- `billion` - \"milliard/milliards\"\n", - "- `trillion` - \"billion/billions\"\n", - "\n", - "Like the \"cent/cents\" rule, these values alternate with a plural form in the case of multiples of the value. Writing them out:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "sBu7-dub4vxz" - }, - "outputs": [], - "source": [ - "millions = pynini.accep(\"million\") | pynini.accep(\"millions\")\n", - "graph_millions = ((graph_hundreds + delete_hyphen + pynutil.delete(millions) + delete_hyphen) | pynutil.insert(\"000\", weight=.1) # We need three zeroes now\n", - " ) + graph_thousands" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LmMeCHXr5Bb5" - }, - "outputs": [], - "source": [ - "billions = pynini.accep(\"milliards\") | pynini.accep(\"milliard\")\n", - "graph_billions = ((graph_hundreds + delete_hyphen + pynutil.delete(billions) + delete_hyphen)| pynutil.insert(\"000\",weight=.1) # We need three zeroes now\n", - " ) + graph_millions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "CIRIeQEg5B0J" - }, - "outputs": [], - "source": [ - "trillions = pynini.accep(\"billion\") | pynini.accep(\"billions\")\n", - "graph_trillions = ((graph_hundreds + delete_hyphen + pynutil.delete(trillions) + delete_hyphen) | pynutil.insert(\"000\",weight=.1) # We need three zeroes now\n", - " ) + graph_billions" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "sRNUPx-15J1v" - }, - "source": [ - "Bringing all together:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0dLOWm_B5SwQ" - }, - "outputs": [], - "source": [ - "graph = graph_trillions | zero" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nBFE3BrN6IPR" - }, - "source": [ - "Let's try it out:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "6lWwtR1S6LI4", - "outputId": "3a6740ee-9e92-4500-c2c8-965131167e58" - }, - "outputs": [], - "source": [ - "example = \"deux-cent-milliard-quatre-million-deux-cent-quatre-vingt-onze\"\n", - "apply_fst(example, graph) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Finishing Touches" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-w3KgX6C6mff" - }, - "source": [ - "Now that we have our cardinal in place, we can take care of that stylistic issue of the leading zeroes. For this, we want to develop a 'filter' that deletes all zeroes preceding the first non-zero in the string, and leave the rest 'as is.'\n", - "\n", - "First let us create the filter by calling on `NEMO_DIGIT`- a `graph_util` WFST that only permits digits as input. With it, we'll create a WFST that will delete all leading zeroes in a sting. We then compose this (using `@`) onto our original graph, creating a new graph that accepts inputs from our original but produces only the outputs of `clean_cardinal`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 290 - }, - "id": "EA4VnRe6FO-2", - "outputId": "59e412b3-a445-4172-ee64-b0f80281a167" - }, - "outputs": [], - "source": [ - "delete_leading_zeroes = pynutil.delete(pynini.closure(\"0\")) # will delete all zeroes under closure. Equivalent to regex * operator\n", - "stop_at_non_zero = pynini.difference(NEMO_DIGIT, \"0\") # creates a graph that accepts all input-outputs from NEMO_DIGIT except 0\n", - "rest_of_cardinal = pynini.closure(NEMO_DIGIT) # accepts all digits that may follow\n", - "\n", - "clean_cardinal = delete_leading_zeroes + stop_at_non_zero + rest_of_cardinal\n", - "clean_cardinal = clean_cardinal | \"0\" # We don't want to ignore the occurrence of zero\n", - "\n", - "graph = graph @ clean_cardinal " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "piP9nqQkHpo3" - }, - "source": [ - "Now our WFST will output our numbers as normal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dnQ9odSpIAB7" - }, - "outputs": [], - "source": [ - "apply_fst(example, graph)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Final Notes\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "p7zt8lVsK2rY" - }, - "source": [ - "We have finally formulated a grammar that will process French cardinals into numeric representation. Of course, not every grammar you write will be for French. But several of the principles we've worked through will be invaluable in your own development. Before moving on, here's a quick summary of (almost) universal points to take away for WFST construction.\n", - "- Decide at the beginning of construction the level of constraint you wish for your grammar. Is it necessary to have a specific domain or can you rely on upstream models to narrow your input possibilities for you? \n", - "- Work iteratively upwards from the smallest place value of your numeric system. This will assist you in forming building blocks for larger values. \n", - "- Always allow for the possibility of omission of previous place values. (Not every number in the thousands will contain mention of the hundreds place.)\n", - "- For each place value, consider how the sub-grammar will affect the preceding and following place values. Are there exceptions that you've built into the grammar that may become problematic later on?\n", - "- Utilize weights for default insertions to limit path traversal to only final options. When doing so, use small values to avoid escalating problems in your larger grammar." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nvyHg1bQIIHD" - }, - "source": [ - "With that handled, we can move on to converting this grammar into a Classifier." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gJ1YJUvhIZwm" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "q2L2x0crIeXQ" - }, - "source": [ - "Now that we have a grammar that will convert individual tokens into number strings, we now want to focus on building it into a classifier to properly tag candidate tokens. This requires a couple of properties:\n", - "- It recognizes any valid token and permits traversal through the WFST graph\n", - "- Conversely, it does not allow invalid tokens to traverse the WFST graph\n", - "- It properly disambiguates overlap among ambiguous cases\n", - "- It attributes the proper attributes to a classified token\n", - "\n", - "While this seems like a lot, in practice this just means that your grammar will need a few more tweaks to improve exclusivity." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ArEYn7RWKcYI" - }, - "source": [ - "NeMo ITN performs token classification through a series of `GraphFst` classes and assumes deployment of your grammars through an object that inherits from this class. As such, you will need to instantiate your grammar as a `CardinalFST` " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 368 - }, - "id": "GWgMSybqLqiS", - "outputId": "597c00ae-0f62-417f-888c-88c81c24a3fc" - }, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"classify\")\n", - " # Rest of the grammar here\n", - " # ....... \n", - " #........." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SIE8dNQlL52G" - }, - "source": [ - "While the naming convention may vary, the `name` and `kind` properties must be set accordingly to permit Sparrowhawk integration.\n", - "\n", - "Further, the resulting graph must produce the classified token within the following format:\n", - "`token { cardinal { integer: \"DIGIT_STRING\" } }`\n", - "\n", - "This is accomplished by a series of string insertions:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "aC_c64KSNTCg" - }, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"classify\")\n", - " # Rest of the grammar here\n", - " # ....... \n", - " #.........\n", - " self.fst = pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AGLQxOSzOK1F" - }, - "source": [ - "Followed by a call of the parent `GraphFst.add_tokens()` method:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Jz-UXFipORps" - }, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"classify\")\n", - " # Rest of the grammar here\n", - " # ....... \n", - " #.........\n", - " self.fst = pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")\n", - " final_graph = self.add_tokens(graph)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gh23S7BHOY0r" - }, - "source": [ - "Which will insert the appropriate formatting. Note that this formatting must be exact: a single space must follow each field name and each value must be within escaped double quotes.\n", - "\n", - "In the event that you also wish for `CardinalFst` to indicate negative values, the optional `negative: ` property may be used.\n", - "\n", - "For instance, French indicates negative values by prefacing the quantity with \"moins.\" As such:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3JbTn35cOx0k" - }, - "outputs": [], - "source": [ - "optional_minus_graph = pynini.closure(\n", - " pynutil.insert(\"negative: \") + pynini.cross(\"moins\", \"\\\"-\\\"\") + \" \", 0, 1 # Note the extra space to separate the value from the integer field\n", - ")\n", - "\n", - "final_graph = optional_minus_graph + pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DCs1048v6N0K" - }, - "source": [ - "All together, your `CardinalFst` ultimately serves as a wrapper for your grammar, save with the addition of a few insertions to assist processing:\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "eo6uEz1s5TJY" - }, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"classify\")\n", - " \n", - " ### Cardinal Grammar....\n", - " ### .....\n", - " graph = graph_trillions | zero \n", - "\n", - " ### Formatting grammar....\n", - " ### .....\n", - " graph = graph @ clean_cardinal\n", - "\n", - " ### Token insertion\n", - " optional_minus_graph = pynini.closure(\n", - " pynutil.insert(\"negative: \") + pynini.cross(\"moins\", \"\\\"-\\\"\") + \" \", 0, 1\n", - " )\n", - "\n", - " final_graph = optional_minus_graph + pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")\n", - "\n", - " final_graph = self.add_tokens(final_graph) # inserts the cardinal tag\n", - "\n", - " self.fst = final_graph" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MFIMdLCoZzLK" - }, - "source": [ - "Let's see a demonstration. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4CF6Iz9NZ7R_" - }, - "outputs": [], - "source": [ - "cardinal = CardinalFst().fst\n", - "\n", - "example = \"moins deux-cent-quatre\"\n", - "\n", - "apply_fst(example, cardinal)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uvUqpC_Q8FSt" - }, - "source": [ - "The verbalizer can be both the most crucial and simplest part of building each grammar. On one hand, it is the component that finalizes all of your previous work. If it is unable to properly normalize your text, everything has been for naught.\n", - "\n", - "On the other hand, your previous work has vastly limited the unpredictability of your input. Recall from our initial demonstration of the classifier-verbalizer system that and input like <> becomes:\n", - "\n", - "- `tokens { name: \"le\" }`\n", - "- `tokens { date { day: \"1\" month: \"juillet\" }` \n", - "- `tokens { name: \"il\" }` \n", - "- `tokens { name: \"a\" }` \n", - "- `tokens { name: \"mangé\" }`\n", - "- `tokens { cardinal { integer: \"35\" } }` \n", - "- `tokens { name: \"pommes\" }`\n", - "\n", - "Part of the purpose of the two stage set-up is that the input space for each verbalizer is obvious: it's simply the name of its semiotic class. As such, we only need to write our grammar to recognize its class, remove tokens accordingly, and then manage the attributes of each semiotic token." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "et1GgmBuAWzY" - }, - "source": [ - "We will begin as we did with our classifier and create a class to inherit from the `GraphFST` utility class:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NNKpgWtkAgEW" - }, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"verbalize\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OyAV39NsAqSN" - }, - "source": [ - "One of the useful aspects of the `GraphFst` utility is that it already possesses a built in graph that will recognize and remove semiotic tokens: `delete_tokens`. As such we need only concern ourselves with managing the properties of the Cardinal class:\n", - "- `integers`\n", - "- `negative`\n", - "\n", - "Here, the desired written format of your chosen language will dictate how you proceed. For French, we have the following rules for Cardinal numbers:\n", - "- A negative sign is written before the numeral.\n", - "- Cardinal numbers representing quantities (e.g. \"mille euros\"/ \"one thousand dollars\") are written with spaces in-between every three digits. (e.g. `1 000`)\n", - "- Cardinal numbers representing place in a sequence or addresses (\"page mille\"/\"page one thousand\") are written without spacing. (`1000`)\n", - "\n", - "The first property seems easy enough to handle: write a grammar that simply removes the `negative` formatting, leaving only `-`. (Recall that our Classifier only inserted the string if it was present.) \n", - "\n", - "For the final two, we may note that our intention to develop WFSTs for the Decimal, Measure, and Money classes already will cover most desired quantities. As such, we can leave the issue of spacing to those instances and let the Cardinal WFST default to the non-spacing case. (Note that this will be helpful with Time, Date, Telephone, Electronic, and Ordinal classes as they will not use the spacing format either. It is usually better to reserve specific formatting rules to other classes and let the Cardinal serve as a default.)\n", - "\n", - "As such, we just need our WFST to remove the `integer` property and `negative` property (if it occurs). These can be managed through the `pynutil.delete` function, as seen in the following:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 368 - }, - "id": "6MF2I6SLU7nf", - "outputId": "0437c4af-5c96-4122-8af0-ca37723c7228" - }, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"verbalize\")\n", - " \n", - " # Removes the negative attribute and leaves the sign if occurs\n", - " optional_sign = pynini.closure(\n", - " pynutil.delete(\"negative:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.accep(\"-\")\n", - " + pynutil.delete(\"\\\"\")\n", - " + delete_space,\n", - " 0,\n", - " 1,\n", - " )\n", - " \n", - " # removes integer aspect\n", - " graph = (\n", - " pynutil.delete(\"integer:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1) # Accepts at least one digit\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " \n", - " graph = optional_sign + graph # concatenates two properties\n", - "\n", - " delete_tokens = self.delete_tokens(graph) # removes semiotic class tag\n", - "\n", - " self.fst = delete_tokens.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "QSX2KlZJbRAA" - }, - "source": [ - "Let's see if it will properly render a given token:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "JxaLm2k0bYIJ" - }, - "outputs": [], - "source": [ - "cardinal = CardinalFst().fst\n", - "example = 'cardinal { negative: \"-\" integer: \"204\" }'\n", - "\n", - "apply_fst(example, cardinal)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Bc0-QCBHWg-8" - }, - "source": [ - "That's it! We've now completed all aspects of our `CardinalFst` from grammar writing to Verbalization. While we still have quite a few semiotic classes left, you will find that they build off the `CardinalFst` quite easily, making progression much simpler and straightforward.\n", - "\n", - "Before proceeding, there are two things to note:\n", - "- `delete_tokens` is called on the completed graph, despite the token class occurring first in the tokenized string. This is because the function intersects with an initial WFST that deletes the tags. As such, the function must be passed a completed graph.\n", - "- In our initial example, all tokens were enclosed within a `token` category. Insertion and deletion of this category is managed by the main [Classifier](#tokenize-and-classify) and [Verbalizer](#verbalize-and-verbalize-final) respectively and is not a concern during individual class grammar development.\n", - "- Earlier in the tutorial we noted that NeMo ITN permutates all WFSTs unless the `preserve_order` tag is passed as part of the Classifier. This allows you to ignore possible variation in designing the verbalizer and focus on whatever form of processing is easiest for the grammar. That is, the decision to process the `negative` property before the `integer` property is not chosen because of a consequence of the French language but instead because it is easier to write out with `pynini`. \n", - "- Conversely, if your language is completely invariant in this regard, it may be more efficient to pass `preserve_order` through the Classifier and manage the property here in the Verbalizer. This allows NeMo ITN to avoid building states and arcs for each permutation, reducing graph size and compiling time." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aFUrbSdJ8Wk7" - }, - "source": [ - "# Ordinal WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "w1b0Z7f5Z9Ar" - }, - "source": [ - "Ordinals is the class of numbers used for enumerating order or placement of entities in a series. In some languages, they are simply derivations of cardinal numbers. For instance, English enumerates order as `first, second, third, fourth, fifth....` After the third ordinal, they become a regular pattern of `cardinal + 'th'`.\n", - "\n", - "Meanwhile, other languages may reserve specific counting systems for ordinals. For example, while Korean uses a Chinese derived counting system for several Cardinal related tasks, it uses derivations from a native counting system for ordering:\n", - "\n", - "**Cardinal**/**Ordinal** = **English**\n", - "- il/cheot-jae = \"First\"\n", - "- i/dul-jae = \"Second\"\n", - "- sam/set-jae = \"Third\"\n", - "- sa/net-jae = \"Fourth\"\n", - "- o/daseot-jae = \"Fifth\"\n", - "\n", - "If your language is of the latter variety, you will likely need to begin development of Ordinal WFST by repeating Cardinal WFST development before proceeding. (Or make it part of your previous Cardinal WFST and combining with a `union` operation.) While you can extend coverage to the level of Cardinal WFST, you will find most Ordinals to be sufficiently covered by only enumerating to a few hundreds. (e.g. Is it common in your language to speak of the \"one millionth\" in an order and/or write out `1,000,000th`?)\n", - "\n", - "For this portion of the tutorial, we will focus on the first type of ordinals - those that primarily derived by altering Cardinals." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oq_xA8NPiANw" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lhjcQS6oiD_w" - }, - "source": [ - "Continuing with our example language, we first begin by laying out our expected inputs and pinpointing a regular pattern to guide our WFSTs. We note the following examples:\n", - "\n", - " **English = French**\n", - " - \"first\" = \"premier/première\"\n", - " - \"second\" = \"second/seconde/deuxième\"\n", - " - \"third\" = \"troisième\"\n", - " - \"fourth\" = \"quatrième\"\n", - " - \"fifth\" = \"cinquième\"\n", - " - \"sixth\" = \"sixième\"\n", - " - \"seventh\" = \"septième\"\n", - "\n", - "From our examples inputs, it appears that spelling of French Ordinals follows a general format of: `cardinal + ième`. The only exceptions appear to be in the case of the first and second Ordinals - for which completely different roots appear - and the fourth and the fifth Ordinals - where the former drops the \"e\" at the end of the root (`quatre -> quatr`) and the latter appends a \"u\" (`cinq -> cinqu`). \n", - "\n", - "For the expected outputs, we observe the following examples:\n", - " - \"premier/première\" -> `1ᵉʳ/1ʳᵉ`\n", - " - \"second/seconde\" -> `2ᵈ/2ᵈᵉ`\n", - " - \"deuxième\" -> `2ᵉ`\n", - " - \"troisième\" -> `3ᵉ`\n", - " - \"quatrième\" -> `4ᵉ`\n", - " - \"cinquième\" -> `5ᵉ`\n", - " - \"sixième\" -> `6ᵉ`\n", - " - \"septième\" -> `7ᵉ`\n", - "\n", - "It appears that the output is simply the cardinal number of the root with an associated superscript. Since we have already constructed the Cardinal WFST, this means that the job of constructing an Ordinal WFST is simply a case of recognizing the cardinal root for the input and then utilizing a preconstructed Cardinal grammar to render the proper form alongside an associated superscript. That is, our tasks are to:\n", - "- Identify the proper superscript for the ordinal\n", - "- Change the ordinal back into a cardinal\n", - "- Use the Cardinal WFST to transform the cardinal into normalized form\n", - "- Properly render the ordinal using the normalized cardinal and proper superscript\n", - "\n", - "As information regarding the superscript will need to be conveyed through development of the Classifier, we will begin with creating the grammar necessary for rendering the ordinal as its cardinal root. \n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AOUVZhiwT7hE" - }, - "source": [ - "### Stripping Suffixes" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5nw0_lOTsEik" - }, - "source": [ - "Since French forms Ordinals by appending a suffix to Cardinals, we should start by creating a WFST to remove the suffix. Assuming that our grammar processes one token at a time, this means that we just need an WFST that will accept all tokens that end with \"ième\" and then delete the suffix from that token:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Rk89LhsxsHTO" - }, - "outputs": [], - "source": [ - "strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", - "graph_strip_morpheme = NEMO_SIGMA + strip_morpheme # accepts all strings until passed suffix, then deletes suffix" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pLg-PzdntV4N" - }, - "source": [ - "Now we can create a graph that permits all characters in a word token and deletes the ordinal suffix. (Note that this also means that the graph won't accept tokens without the suffix, helping us avoid false inputs.) \n", - "\n", - "We can now intersect this graph with our Cardinal WFST to now strip the suffixes from ordinals and treat them as cardinals. However, recall that our `CardinalFst` also inserted its own class tag. Obviously, we do not want to do this here as it will disrupt the formatting of the token. Instead, we should create a new subgraph *within* the `CardinalFst` class that will only produce the cardinals without tokens." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class CardinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"cardinal\", kind=\"classify\")\n", - " \n", - " ### Cardinal Grammar....\n", - " ### .....\n", - " graph = graph_trillions | zero \n", - "\n", - " ### Formatting grammar....\n", - " ### .....\n", - " graph = graph @ clean_cardinal\n", - " \n", - " ### NEW GRAPH\n", - " self.just_cardinals = graph # will produce cardinals without formatting\n", - "\n", - " ### Token insertion\n", - " optional_minus_graph = pynini.closure(\n", - " pynutil.insert(\"negative: \") + pynini.cross(\"moins\", \"\\\"-\\\"\") + \" \", 0, 1\n", - " )\n", - "\n", - " final_graph = optional_minus_graph + pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")\n", - "\n", - " final_graph = self.add_tokens(final_graph)\n", - "\n", - " self.fst = final_graph" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we call it for our graph:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "vxDgBa4_t1nD" - }, - "outputs": [], - "source": [ - "graph_cardinal = CardinalFst().just_cardinals \n", - "graph_ordinal_regular_suffix = graph_strip_morpheme @ graph_cardinal" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hSpk5M7BuXRz" - }, - "source": [ - "Let's see if it works and gives us the desired cardinal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7cJ7fieouY2r" - }, - "outputs": [], - "source": [ - "example = \"sixième\" # dervied from six/6\n", - "apply_fst(example, graph_ordinal_regular_suffix)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GtEuV7sOuxek" - }, - "source": [ - "Now we can consider the edge cases. Beyond the first and second ordinals, French exhibits irregular behavior in the following cases:\n", - "- If the cardinal root ends with an \"e\", the \"e\" is dropped before adding the suffix (e.g. \"quatrième\"). \n", - "- Cardinals ending with \"cinq\", \"neuf\", and \"dix\" change their endings to \"cinqu\", \"neuv\" , and \"diz\" before appending the suffix, respectively. \n", - "\n", - "We could start by proposing a WFST that replaces the suffix \"ième\" with \"e\" and then compose this onto the Cardinal WFST. If it is a legitimate cardinal, then there will be a path through CardinalFST and the integer will be rendered as normal. \n", - "\n", - "Meanwhile, the case of \"dix\", \"cinq\", and \"neuf\" would each require a distinct WFST as they are each a consequence of different rules of orthography and phonology. Like the case with \"e\", we could change each back to its root and then see if the CardinalWFST will permit a path with the new input. \n", - "\n", - "It is at this point that we can do a cost-benefit analysis and realize that all these cases can be managed by an explicit `string_map/string_file`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_9KTNQeIw4sq" - }, - "outputs": [], - "source": [ - "graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", - " (\"cinquième\",\t\"cinq\"),\n", - " (\"neuvième\",\t\"neuf\"),\n", - " (\"onzième\",\t\"onze\"),\n", - " (\"douzième\",\t\"douze\"),\n", - " (\"treizième\",\t\"treize\"),\n", - " (\"quatorzième\",\t\"quatorze\"),\n", - " (\"quinzième\",\t\"quinze\"),\n", - " (\"seizième\",\t\"seize\"),\n", - " (\"trentième\",\t\"trente\"),\n", - " (\"quarantième\",\t\"quarante\"),\n", - " (\"cinquantième\",\t\"cinquante\"),\n", - " (\"soixantième\",\t\"soixante\"),\n", - " (\"millième\",\t\"mille\"),\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "eo2_keFVqaY4" - }, - "source": [ - "We could then concatenate these with a WFST that accepts all tokens with these endings and then change the endings as desired. These will provide the cardinal roots just as effectively. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "O7I29ezmxylx" - }, - "source": [ - "The same can be said for \"premier/première\" and \"second/seconde\":" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3JZoz51VyGS6" - }, - "outputs": [], - "source": [ - "graph_firsts = pynini.string_map([(\"premier\", \"un\"),(\"première\", \"un\")])\n", - "graph_seconds = pynini.string_map([(\"second\", \"deux\"),(\"seconde\", \"deux\")])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NJ9BGGAwyTQ5" - }, - "source": [ - "*Note: We graph separately to manage their different superscripts later on.*\n", - "\n", - "Depending on your language of focus, the choice of implicitly reversing the root token or explicitly mapping back to root will be the most efficient, but it is worth considering both options if only to check your understanding of the language." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8PgVwDRRq9gr" - }, - "source": [ - "Putting our grammar together, we have:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ko2kAeKwrRSH" - }, - "outputs": [], - "source": [ - "strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", - "\n", - "graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", - " (\"cinquième\",\t\"cinq\"),\n", - " (\"neuvième\",\t\"neuf\"),\n", - " (\"onzième\",\t\"onze\"),\n", - " (\"douzième\",\t\"douze\"),\n", - " (\"treizième\",\t\"treize\"),\n", - " (\"quatorzième\",\t\"quatorze\"),\n", - " (\"quinzième\",\t\"quinze\"),\n", - " (\"seizième\",\t\"seize\"),\n", - " (\"trentième\",\t\"trente\"),\n", - " (\"quarantième\",\t\"quarante\"),\n", - " (\"cinquantième\",\t\"cinquante\"),\n", - " (\"soixantième\",\t\"soixante\"),\n", - " (\"millième\",\t\"mille\"),\n", - "])\n", - "\n", - "# Component will accept all tokens that end with desired strings\n", - "graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", - "\n", - "graph_firsts = pynini.string_map([(\"premier\", \"un\"),(\"première\", \"un\")])\n", - "graph_seconds = pynini.string_map([(\"second\", \"deux\"),(\"seconde\", \"deux\")])\n", - "\n", - "graph_get_cardinal = pynini.union(graph_firsts, graph_seconds, graph_get_cardinal) \n", - "\n", - "graph_cardinal = CardinalFst().just_cardinals\n", - "\n", - "graph_ordinal = graph_get_cardinal @ graph_cardinal" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ESxY3LsCdE8q" - }, - "outputs": [], - "source": [ - "apply_fst(\"sixième\", graph_ordinal)\n", - "apply_fst(\"première\", graph_ordinal)\n", - "apply_fst(\"seconde\", graph_ordinal)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qo_g8UdoUFJB" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kemhdKAjzEIa" - }, - "source": [ - "Now that we've found a way to pass the work of the Ordinal grammar back onto the Cardinal grammar, we can move onto the Classifier. Like before, we need to inherit from `GraphFst` to properly insert token formatting and required attributes. As well, we will again use the `integer` property to tag our digit string.\n", - "\n", - "Indeed, the only major difference between the Ordinal Classifier and the Cardinal Classifier is the replacement of optional `negative` attribute with the `morphosyntactic_feature` attribute to indicate the superscript function." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "EHM4Y3TW2nXT" - }, - "source": [ - "Since we are relying on the `CardinalFst` class in our grammar, we want to consider how to instantiate an instance of it. Since our ultimate goal is to build a Classifier that unites all semiotic classes, it makes sense to simply use the `CardinalFst` that we will need to call for our ITN and pass it as an argument to our new class." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 273 - }, - "id": "KsmPhWSa3LF_", - "outputId": "9e881ca9-a926-4249-dda8-9c52175569b5" - }, - "outputs": [], - "source": [ - "def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"ordinal\", kind=\"classify\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CtBQ-udB3S5Q" - }, - "source": [ - "To clear up the namespace, we will now be importing from the NeMo implementation of `CardinalFst` for French." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "L-JAcidf4QQg" - }, - "outputs": [], - "source": [ - "from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst\n", - "\n", - "class OrdinalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"ordinal\", kind=\"classify\")\n", - " graph_cardinal = cardinal.graph_no_exception # NeMo equivalent to self.just_cardinals" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FQfkAqZavCAB" - }, - "source": [ - "We now add in our grammar:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "uUQ4BLuivGut" - }, - "outputs": [], - "source": [ - "class OrdinalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"ordinal\", kind=\"classify\")\n", - " graph_cardinal = cardinal.graph_no_exception # may replace\n", - "\n", - " strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", - "\n", - " graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", - " (\"cinquième\",\t\"cinq\"),\n", - " (\"neuvième\",\t\"neuf\"),\n", - " (\"onzième\",\t\"onze\"),\n", - " (\"douzième\",\t\"douze\"),\n", - " (\"treizième\",\t\"treize\"),\n", - " (\"quatorzième\",\t\"quatorze\"),\n", - " (\"quinzième\",\t\"quinze\"),\n", - " (\"seizième\",\t\"seize\"),\n", - " (\"trentième\",\t\"trente\"),\n", - " (\"quarantième\",\t\"quarante\"),\n", - " (\"cinquantième\",\t\"cinquante\"),\n", - " (\"soixantième\",\t\"soixante\"),\n", - " (\"millième\",\t\"mille\"),\n", - " ])\n", - " \n", - " # Component will accept all tokens that end with desired strings\n", - " graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", - "\n", - " graph_firsts = pynini.string_map([(\"premier\", \"un\"),(\"première\", \"un\")])\n", - " graph_seconds = pynini.string_map([(\"second\", \"deux\"),(\"seconde\", \"deux\")])\n", - "\n", - " graph_get_cardinal = pynini.union(graph_firsts, graph_seconds, graph_get_cardinal) \n", - "\n", - " graph_ordinal = graph_get_cardinal @ graph_cardinal\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "F_6EXPRMvnp2" - }, - "source": [ - "Now we come to the `morphosyntactic_features` property - a linguistic term for aspects of a word related to grammar. If intending to deploy your WFST through Sparrowhawk, this is the only ordinal property that is permitted (outside of the universal properties like `preserve_order`) and thus must carry all information regarding how to properly normalize the ordinal. (If Sparrowhawk deployment is not necessary, you may add additional properties to the tag.)\n", - "\n", - "How should we convey this information? Since the Verbalizer will be the main interface for our tags, it really does not matter - so long as we can reliably process the features. For the purposes of French, we just need `morphosyntactic_features` to decide the following:\n", - "- Insert the specific superscripts for \"premier/première\" or \"second/seconde\"\n", - "- Insert \"ᵉ\" otherwise\n", - "\n", - "We will also introduce another aspect of French Ordinals: they can be either plural or singular, identified by the suffix \"s\" on input and superscript \"ˢ\" on output. As such, our `morphosyntactic_features` should also decide the additional property:\n", - "- Insert the plural superscript " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "atctz6p-2GtV" - }, - "source": [ - "Since the default superscript is near universal, we will just specify this in our WFST and focus on the second and first ordinals as specific cases. We will create a `graph_morpheme` component that inserts the default superscript - indicated with a standard \"e\" to avoid possible encoding issues. We will then append a WFST that will graph any possible plural marker - \"s\" - as part the `morphosyntactic_features`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ui99osyP2UuQ" - }, - "outputs": [], - "source": [ - "graph_morpheme = pynutil.insert(\"e\") # Insert e superscript\n", - "graph_plural = pynini.closure(pynini.accep(\"s\"), 0, 1) # We create an acceptor since we must process the possible \"s\"\n", - "\n", - "graph_morpheme_component = graph_morpheme + graph_plural\n", - "\n", - "graph_morphosyntactic_features = (pynutil.insert(\" morphosyntactic_features: \\\"\") \n", - " + graph_morpheme_component\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "QAlqubA25gq0" - }, - "source": [ - "Introducing the `integer` feature:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rs2TyIBc5la6" - }, - "outputs": [], - "source": [ - "graph_reg_ordinals = graph_get_cardinal @ graph_cardinal # Rewriting ordinals to remove the first and second ordinal.\n", - "\n", - "graph_ordinal = pynutil.insert(\"integer: \\\"\") + graph_reg_ordinals + pynutil.insert(\"\\\"\")\n", - "graph_ordinal += graph_morphosyntactic_features" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xoqk20Pi2gT8" - }, - "source": [ - "For the first and second ordinals, we can explicitly state their mappings, as these occurrences are invariable. (First and second ordinals do not need to accommodate being the endings of other terms.) As such, we can just have mappings from the token to the superscripts." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "54aqdH_P63Ea" - }, - "outputs": [], - "source": [ - "firsts = pynini.string_map([(\"premier\", \"er\"), (\"première\",\"re\")])\n", - "firsts += graph_plural # Still accepts plural marker in superscript\n", - "seconds = pynini.string_map([(\"second\", \"d\"),(\"seconde\", \"de\")])\n", - "seconds += graph_plural \n", - "\n", - "graph_firsts = pynutil.insert(\"integer: \\\"1\\\" morphosyntactic_features: \\\"\") + firsts\n", - "graph_seconds = pynutil.insert(\"integer: \\\"2\\\" morphosyntactic_features: \\\"\") + seconds" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "D2vQ4m7o7p84" - }, - "source": [ - "Placing them in our class:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "w_JKT8JMf-Mz" - }, - "outputs": [], - "source": [ - "class OrdinalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"ordinal\", kind=\"classify\")\n", - " graph_cardinal = cardinal.graph_no_exception # may replace\n", - "\n", - " strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", - "\n", - " graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", - " (\"cinquième\",\t\"cinq\"),\n", - " (\"neuvième\",\t\"neuf\"),\n", - " (\"onzième\",\t\"onze\"),\n", - " (\"douzième\",\t\"douze\"),\n", - " (\"treizième\",\t\"treize\"),\n", - " (\"quatorzième\",\t\"quatorze\"),\n", - " (\"quinzième\",\t\"quinze\"),\n", - " (\"seizième\",\t\"seize\"),\n", - " (\"trentième\",\t\"trente\"),\n", - " (\"quarantième\",\t\"quarante\"),\n", - " (\"cinquantième\",\t\"cinquante\"),\n", - " (\"soixantième\",\t\"soixante\"),\n", - " (\"millième\",\t\"mille\"),\n", - " ])\n", - " \n", - " # Component will accept all tokens that end with desired strings\n", - " graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", - "\n", - " # Graph will map ordinals beyond second ordinal to their cardinals\n", - " graph_reg_ordinals = graph_get_cardinal @ graph_cardinal\n", - "\n", - " # Graphing morphosyntactic_features\n", - " graph_morpheme = pynutil.insert(\"e\") # Insert e superscript\n", - " graph_plural = pynini.accep(\"s\").ques # ques is equivalent to pynini.closure(, 0, 1)\n", - "\n", - " graph_morpheme_component = graph_morpheme + graph_plural\n", - "\n", - " graph_morphosyntactic_features = (pynutil.insert(\" morphosyntactic_features: \\\"\") \n", - " + graph_morpheme_component\n", - " )\n", - "\n", - " # Adding in the `integer` property:\n", - " graph_ordinal = pynutil.insert(\"integer: \\\"\") + graph_reg_ordinals + pynutil.insert(\"\\\"\")\n", - " graph_ordinal += graph_morphosyntactic_features \n", - "\n", - " # Case of first and second ordinals\n", - " firsts = pynini.string_map([(\"premier\", \"er\"), (\"première\",\"re\")])\n", - " firsts += graph_plural # Still accepts plural marker in superscript\n", - " seconds = pynini.string_map([(\"second\", \"d\"),(\"seconde\", \"de\")])\n", - " seconds += graph_plural \n", - "\n", - " graph_firsts = pynutil.insert(\"integer: \\\"1\\\" morphosyntactic_features: \\\"\") + firsts\n", - " graph_seconds = pynutil.insert(\"integer: \\\"2\\\" morphosyntactic_features: \\\"\") + seconds\n", - "\n", - " # All together\n", - " graph_ordinal = pynini.union(graph_ordinal, graph_firsts, graph_seconds)\n", - " self.fst = graph_ordinal.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CpGHVg6chmA0" - }, - "source": [ - "Trying out on some examples:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "b5DL3PZRhpc8" - }, - "outputs": [], - "source": [ - "cardinal = CardinalFst()\n", - "ordinal = OrdinalFst(cardinal).fst\n", - "\n", - "apply_fst(\"premier\", ordinal)\n", - "apply_fst(\"premiers\", ordinal)\n", - "apply_fst(\"seconde\", ordinal)\n", - "apply_fst(\"douzièmes\", ordinal)\n", - "apply_fst(\"cent-cinquièmes\", ordinal)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MNQVgiv-UK29" - }, - "source": [ - "### Special Tokens" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "UdiNAHGh71O9" - }, - "source": [ - "If you are particularly astute, you may have noticed that we have not closed the quotations around the `morphosyntactic_features` throughout, despite doing so for `integer`. This is not a typo, as there is one more aspect of the Classifier that must be addressed: special cases.\n", - "\n", - "For your language, you may notice that there are occasional exceptions to writing rules that are signaled by a specific vocabulary token in a string. As this must be communicated to our Verbalizer, it is important that we signal this vocabulary through our Classifier. \n", - "\n", - "For French, this can occur in the normalization of centuries. When using Ordinals to indicate centuries, French commonly writes with Roman numerals. For example:\n", - "- \"Fifth century\" -> \"cinquième siècle\" -> `Vᵉ siècle` \n", - "- \"Twentieth century\" -> \"vintième siècle\" -> `XXᵉ siècle` \n", - "\n", - "As such, we must allow our Classifier to pass on the information that \"siècle\" follows an ordinal to our Verbalizer, so it may normalize with Roman numerals. We accomplish this by appending a WFST that accepts special tokens that follow our Ordinals, adding them to our `morphosyntactic_features` attribute with a forward slash to delineate." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "MsWnT4BfQKcC" - }, - "outputs": [], - "source": [ - "special_tokens = pynini.accep(\"siècle\")\n", - "\n", - "graph_special_tokens = delete_space + pynutil.insert(\"/\") + special_tokens # We need to delete the space in between this token and the following one.\n", - "graph_special_tokens = pynini.closure(graph_special_tokens, 0, 1)\n", - "\n", - "graph_ordinal += graph_special_tokens + pynutil.insert(\"\\\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "698_n5SFQ_jP" - }, - "source": [ - "*Once again, it is advised to retain a tsv file in `data` to quickly append these key-words.*\n", - "\n", - "Having taken care of the special case, we may now call `add_tokens` and complete the graph (fully written out below)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "nZ1dkft0Riou" - }, - "outputs": [], - "source": [ - "class OrdinalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"ordinal\", kind=\"classify\")\n", - " graph_cardinal = cardinal.graph_no_exception # may replace\n", - "\n", - " strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", - "\n", - " graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", - " (\"cinquième\",\t\"cinq\"),\n", - " (\"neuvième\",\t\"neuf\"),\n", - " (\"onzième\",\t\"onze\"),\n", - " (\"douzième\",\t\"douze\"),\n", - " (\"treizième\",\t\"treize\"),\n", - " (\"quatorzième\",\t\"quatorze\"),\n", - " (\"quinzième\",\t\"quinze\"),\n", - " (\"seizième\",\t\"seize\"),\n", - " (\"trentième\",\t\"trente\"),\n", - " (\"quarantième\",\t\"quarante\"),\n", - " (\"cinquantième\",\t\"cinquante\"),\n", - " (\"soixantième\",\t\"soixante\"),\n", - " (\"millième\",\t\"mille\"),\n", - " ])\n", - " \n", - " # Component will accept all tokens that end with desired strings\n", - " graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", - "\n", - " # Graph will map ordinals beyond second ordinal to their cardinals\n", - " graph_reg_ordinals = graph_get_cardinal @ graph_cardinal\n", - "\n", - " # Graphing morphosyntactic_features\n", - " graph_morpheme = pynutil.insert(\"e\") # Insert e superscript\n", - " graph_plural = pynini.accep(\"s\").ques # We create an acceptor since we must process the possible \"s\"\n", - "\n", - " graph_morpheme_component = graph_morpheme + graph_plural\n", - "\n", - " graph_morphosyntactic_features = (pynutil.insert(\" morphosyntactic_features: \\\"\") \n", - " + graph_morpheme_component\n", - " )\n", - "\n", - " # Adding in the `integer` property:\n", - " graph_ordinal = pynutil.insert(\"integer: \\\"\") + graph_reg_ordinals + pynutil.insert(\"\\\"\")\n", - " graph_ordinal += graph_morphosyntactic_features \n", - "\n", - " # Case of first and second ordinals\n", - " firsts = pynini.string_map([(\"premier\", \"er\"), (\"première\",\"re\")])\n", - " firsts += graph_plural # Still accepts plural marker in superscript\n", - " seconds = pynini.string_map([(\"second\", \"d\"),(\"seconde\", \"de\")])\n", - " seconds += graph_plural \n", - "\n", - " graph_firsts = pynutil.insert(\"integer: \\\"1\\\" morphosyntactic_features: \\\"\") + firsts\n", - " graph_seconds = pynutil.insert(\"integer: \\\"2\\\" morphosyntactic_features: \\\"\") + seconds\n", - "\n", - "\n", - " # Special tokens\n", - " special_tokens = pynini.accep(\"siècle\")\n", - "\n", - " graph_special_tokens = delete_space + pynutil.insert(\"/\") + special_tokens # We need to delete the space in between this token and the following one.\n", - " graph_special_tokens = pynini.closure(graph_special_tokens, 0, 1)\n", - "\n", - " graph_ordinal += graph_special_tokens + pynutil.insert(\"\\\"\")\n", - "\n", - " # Finishing\n", - " graph_ordinal = self.add_tokens(graph_ordinal)\n", - " self.fst = graph_ordinal.optimize()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7a4zBo-YS1QD" - }, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zYbrcGyGS2rW" - }, - "source": [ - "The initial part of the Ordinal Verbalizer is similar to the Cardinal WFST: we simply need to build a Verbalizer that inherits from `GraphFST` and removes the `integer` property tag. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "KUv99A_rYjb9" - }, - "outputs": [], - "source": [ - "class OrdinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"ordinal\", kind=\"verbalize\")\n", - " graph_integer = (\n", - " pynutil.delete(\"integer:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zKCt_EapZXGW" - }, - "source": [ - "Now we need to manage the `morphosyntactic_features` component. The first steps seem simple enough: delete the property tag and replace the superscript indicators with the actual superscripts. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "yoa_mXMLabrU" - }, - "outputs": [], - "source": [ - " # Create mappings for all superscripts\n", - " superscript = pynini.union(\n", - " pynini.cross(\"e\", \"ᵉ\"), # only delete first quote since there may be more features\n", - " pynini.cross(\"d\", \"ᵈ\"),\n", - " pynini.cross(\"r\", \"ʳ\"),\n", - " pynini.cross(\"s\", \"ˢ\"),\n", - " )\n", - "\n", - " # Append to deletion of feature property. Note that we use plus closure for multiple superscripts.\n", - " graph_morphosyntactic_features = pynutil.delete(\" morphosyntactic_features: \\\"\") + superscript.plus" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xOA7_MsUrSJS" - }, - "source": [ - "### Romanization" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "K_SaG0DUa2t7" - }, - "source": [ - "Now we come to the possible Romanization component. Since we need to graph the superscript components as following the number, we want to design our graph so that `morphosyntactic_features` is the last component of the graph. However, we do not know that we need Romanization until we see the `morphosyntactic_features` component. As such, we need to design our graph such that two options are available initially for an input, but only one allows full traversal." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7dalc-tablG-" - }, - "source": [ - "![romanization.png](images/romanization.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "mPTNCddNcEEE" - }, - "source": [ - "In cases where your WFST decisions are dependent on latter parts of an input string, permitting the union of two separate paths when only one is valid usually assists, as a standard pathing heuristic will only choose the valid path. \n", - "\n", - "In the case of French, this would require us to separate our Verbalizer into two parts: one for Arabic numerals and one for Roman numerals. For the Arabic WFST, we simply conclude the graph. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0YSy1PYOcuyD" - }, - "outputs": [], - "source": [ - "graph_integer = (\n", - " pynutil.delete(\"integer:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - "graph_Arabic = graph_integer + graph_morphosyntactic_features + pynutil.delete(\"\\\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nnXjUU5Pf7Sh" - }, - "source": [ - "For the Roman graph, things get a bit trickier. Ideally, we would want to build a WFST that maps each digit of `graph_Arabic` to a Roman equivalent. However, consider the following examples:\n", - "- 1 -> I\n", - "- 10 -> X\n", - "- 11 -> XI\n", - "- 100 -> C\n", - "- 101 -> CI\n", - "- 110 -> CX\n", - "- 111 -> CXI\n", - "\n", - "Since Roman numerals do not preserve powers of ten through digit placement, we will need to design separate FSTs for each digit position and apply them accordingly. As this can quickly become intensive, we will only work to enumerate the Ordinals from 1 to 100. (Note: We are doing this to accommodate centuries; there is little likelihood that any century beyond the 99th will be used in regular strings.)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3-fQHMc2iQrz" - }, - "source": [ - "First we design our graphs for converting from Arabic to Roman numerals:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "d6PDySykiXTh" - }, - "outputs": [], - "source": [ - "digits = pynini.string_map([(\"1\", \"I\"),\n", - " (\"2\",\t\"II\"),\n", - " (\"3\",\t\"III\"),\n", - " (\"4\",\t\"IV\"),\n", - " (\"5\",\t\"V\"),\n", - " (\"6\",\t\"VI\"),\n", - " (\"7\",\t\"VII\"),\n", - " (\"8\",\t\"VIII\"),\n", - " (\"9\",\t\"IX\"),\n", - " ])\n", - "tens = pynini.string_map([(\"1\", \"X\"),\n", - " (\"2\",\t\"XX\"),\n", - " (\"3\",\t\"XXX\"),\n", - " (\"4\",\t\"XL\"),\n", - " (\"5\",\t\"L\"),\n", - " (\"6\",\t\"LX\"),\n", - " (\"7\",\t\"LXX\"),\n", - " (\"8\",\t\"LXXX\"),\n", - " (\"9\",\t\"XC\"),\n", - " ])\n", - "zero = pynutil.delete(\"0\") # No Roman representation for zero." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wb-LmwJdk59m" - }, - "source": [ - "Now we build two separate filters: one will accept only single digit Arabic numerals and the other will accept two digit Arabic numerals. For this we can use `NEMO_DIGIT`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "DW3oD7Hbli2X" - }, - "outputs": [], - "source": [ - "map_one_digit = NEMO_DIGIT\n", - "map_two_digits = NEMO_DIGIT ** 2 # pynini overloads the exponent function to allow self-concatenation." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xtYKLy9AmJZS" - }, - "source": [ - "We now build mappings between two digit Arabic numerals and Roman numerals, composing them onto the filters:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dUy7uEUXmT_g" - }, - "outputs": [], - "source": [ - "graph_one_digit_romans = NEMO_DIGIT @ digits\n", - "\n", - "graph_two_digit_romans = tens + (digits | zero)\n", - "graph_two_digit_romans = map_two_digits @ graph_two_digit_romans\n", - "\n", - "graph_romans = graph_one_digit_romans | graph_two_digit_romans" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JEinyAMdm7RJ" - }, - "source": [ - "We now take care of the occurrence of \"siècle\" before composing onto `graph_integer`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ERO19BbynPNX" - }, - "outputs": [], - "source": [ - "graph_romans = (graph_integer @ graph_romans) + graph_morphosyntactic_features\n", - "graph_romans += pynini.cross(\"/\", \" \") + \"siècle\" + pynutil.delete(\"\\\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zN-fwrCGoToQ" - }, - "source": [ - "We finalize with a union and calling `delete_tokens`, the complete Verbalizer now being::" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "kr2wcToAofWB" - }, - "outputs": [], - "source": [ - "class OrdinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"ordinal\", kind=\"verbalize\")\n", - "\n", - " # Maps integer and removes attribute\n", - " graph_integer = (\n", - " pynutil.delete(\"integer:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - "\n", - " # Create mappings for all superscripts\n", - " superscript = pynini.union(\n", - " pynini.cross(\"e\", \"ᵉ\"), # only delete first quote since there may be more features\n", - " pynini.cross(\"d\", \"ᵈ\"),\n", - " pynini.cross(\"r\", \"ʳ\"),\n", - " pynini.cross(\"s\", \"ˢ\"),\n", - " )\n", - "\n", - " # Append to deletion of feature property. Note that we use plus closure for multiple superscripts.\n", - " graph_morphosyntactic_features = pynutil.delete(\" morphosyntactic_features: \\\"\") + superscript.plus\n", - "\n", - " # Writing WFST for Arabic\n", - " graph_Arabic = graph_integer + graph_morphosyntactic_features + pynutil.delete(\"\\\"\")\n", - "\n", - " # Mapping Roman numerals\n", - " digits = pynini.string_map([(\"1\", \"I\"),\n", - " (\"2\",\t\"II\"),\n", - " (\"3\",\t\"III\"),\n", - " (\"4\",\t\"IV\"),\n", - " (\"5\",\t\"V\"),\n", - " (\"6\",\t\"VI\"),\n", - " (\"7\",\t\"VII\"),\n", - " (\"8\",\t\"VIII\"),\n", - " (\"9\",\t\"IX\"),\n", - " ])\n", - " tens = pynini.string_map([(\"1\", \"X\"),\n", - " (\"2\",\t\"XX\"),\n", - " (\"3\",\t\"XXX\"),\n", - " (\"4\",\t\"XL\"),\n", - " (\"5\",\t\"L\"),\n", - " (\"6\",\t\"LX\"),\n", - " (\"7\",\t\"LXX\"),\n", - " (\"8\",\t\"LXXX\"),\n", - " (\"9\",\t\"XC\"),\n", - " ])\n", - " zero = pynutil.delete(\"0\") # No Roman representation for zero.\n", - "\n", - " # filters for Roman digits\n", - " map_one_digit = NEMO_DIGIT\n", - " map_two_digits = NEMO_DIGIT ** 2 # pynini overloads the exponent function to allow self-concatenation.\n", - "\n", - " # Composing onto roman digits\n", - " graph_one_digit_romans = NEMO_DIGIT @ digits\n", - "\n", - " graph_two_digit_romans = tens + (digits | zero)\n", - " graph_two_digit_romans = map_two_digits @ graph_two_digit_romans\n", - "\n", - " graph_romans = graph_one_digit_romans | graph_two_digit_romans\n", - "\n", - " # Writing WFST for Roman\n", - " graph_romans = (graph_integer @ graph_romans) + graph_morphosyntactic_features\n", - " graph_romans += pynini.cross(\"/\", \" \") + \"siècle\" + pynutil.delete(\"\\\"\")\n", - "\n", - " # Final composition\n", - " graph = (graph_romans | graph_Arabic)\n", - "\n", - " delete_tokens = self.delete_tokens(graph)\n", - " self.fst = delete_tokens.optimize()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Trying out our examples:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "example_regular = 'ordinal { integer: \"12\" morphosyntactic_features: \"es\" }'\n", - "example_roman = 'ordinal { integer: \"12\" morphosyntactic_features: \"es/siècle\" }'\n", - "\n", - "fst = OrdinalFst().fst\n", - "\n", - "apply_fst(example_regular, fst)\n", - "apply_fst(example_roman, fst)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yBgLhTq9pWZe" - }, - "source": [ - "We have now completed an Ordinal WFST from the ground up, allowing a separate numbering system for special cases." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-W1-BMVJUXXk" - }, - "source": [ - "## Final notes" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kR7E64P4pPU_" - }, - "source": [ - "Before moving on, there are some key takeaways that you may find useful for most (if not all) languages:\n", - "- Many ordinal systems rely on alteration of Cardinals. Even in the example of Korean, it is using a pre-existing counting system and adding a suffix to indicate ordering. As such, your Ordinal WFST will likely follow this tutorial's structure of changing the Ordinal to its original root and then relying on your Cardinal WFST for the majority of processing.\n", - "- The `morphosyntactic_features` property will carry the vast majority of information necessary for normalization through your Verbalizer.\n", - "- While not all writing systems have the same quirk as using Roman numerals in reference to centuries, you will likely find cases in your language when a specific token indicates unique rules for a semiotic class. Carrying this information to the Verbalizer is usually the simplest means of preserving the token while also facilitating normalization. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Rx8-LuJOUaa5" - }, - "source": [ - "# Decimal WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "D2MRXYxz8TGA" - }, - "source": [ - "\n", - "If the Cardinal WFST is the most crucial element of a normalization grammar, the construction of the Decimal WFST is a close second. Much like in the case of constructing Ordinals from Cardinal grammars, many aspects of the Decimal WFST will be reused throughout your other semiotic classes.\n", - "\n", - "To get started, you should study the numerical conventions in your language. In particular, you should take note of the following:\n", - "- How is the decimal component of a number pronounced in your language of focus. (e.g. The English number `1.33` can be verbalized as \"one point three three\" or \"one and thirty three hundredths.\")\n", - "- What is the punctuation mark used for decimal demarcation? (In North America, several writing systems use `.` while European nations will use `,`.)\n", - "- Are there general rules regarding pronunciation/formatting of numbers past the decimal demarcation? (e.g. Does your language pronounce each digit or pronounce as a series of three digit numbers?)\n", - "\n", - "Such questions will likely require some deep familiarity with the language, and it may benefit to ask a native speaker for some input. Of course, the level of depth is dependent on your needs, but researching these questions will help your normalization system appear more organic." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "UsK78ib4N-gb" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "p4CLOOA9OAwZ" - }, - "source": [ - "In the case of French, we have the following guidelines:\n", - "- French uses the comma ( `,` ) for decimal delineation. It is articulated as \"virgule\".\n", - "- Decimals can be read as a series of digits or grouped as Cardinal numbers arbitrarily. (e.g. \"`.333` can be \"virgule trois trois trois\" or \"virgule trois-cent-trente-trois\".) \n", - "\n", - "As such, our grammar needs to accommodate the following pattern: \n", - "\n", - "`cardinal + \"virgule\" + string_of_cardinals`\n", - "\n", - "Given our experience with our previous WFSTs, this seems simple enough. We assume we have an instance of CardinalFST available and create a subcomponent to map the integer portion of a decimal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XSp9FTzhf0XZ" - }, - "outputs": [], - "source": [ - "cardinal = CardinalFst().graph_no_exception # NeMo equivalent of just_cardinals\n", - "\n", - "# place cardinal under closure to permit values <=1\n", - "graph_integer = pynini.closure(cardinal, 0, 1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bk3_3iawgAZE" - }, - "source": [ - "Compose it on a subcomponent that detects the delineator \"virgule\":" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UMzfAKkngH6z" - }, - "outputs": [], - "source": [ - "delete_virgule = pynutil.delete(\"virgule\")\n", - "graph_decimal = graph_integer + delete_space + delete_virgule" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "GXjbtbLYgn17" - }, - "source": [ - "And permit the occurrence of several strings of cardinals to follow:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LMMNBJz8gtTA" - }, - "outputs": [], - "source": [ - "graph_string_of_cardinals = delete_space + graph_cardinal\n", - "graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", - "\n", - "graph_decimal += graph_string_of_cardinals" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jTgnRLddhGdE" - }, - "source": [ - "Let us try an example:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "D4rjDh0ShJAp" - }, - "outputs": [], - "source": [ - "example = \"trois virgule trois cinquante-cinq\" \n", - "apply_fst(example, graph_decimal) # Should output only the cardinals in the string" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RfD1d9JOioyl" - }, - "source": [ - "### Ambiguity?" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3IaI1mCIe_6i" - }, - "source": [ - "Note that our decision to include multiple strings of cardinals after the decimal marker has introduced some ambiguity into our WFST. Consider if a decimal number was followed by an integer series (e.g. `2.5, 5, 6`). Now what should be an application of one DecimalFST and two applications of a CardinalFST can be interpreted as a single DecimalFST application (e.g. `2.556`). What can be done?\n", - "\n", - "While we will address this in greater depth later (see [Tokenize and Classify](#tokenize-and-classify)), the short answer is that cases such as these must be calibrated according to use and linguistic intuition. As this is an inherent ambiguity in the language and its writing system, we can never truly remove this possibility without restricting our ability to model the language. However, we can rely on a few logical assumptions to guide our decision making:\n", - "- Unless the grammar is deployed in a restrictive setting (e.g. a Financial or environment where strings of numbers are often read in series) it's not likely for a valid string to exhibit this level of ambiguity. Speakers typically try to reduce possible ambiguity in their language production and would likely rephrase to avoid issues such as these. [See Grice's maxims](https://en.wikipedia.org/wiki/Cooperative_principle).\n", - "- While a language may allow a specific string by *rule*, speakers may typically avoid them *in practice* due to conventions or difficulty. In our case, while it may be possible to read `2,100 05` as \"deux virgule dix-mille-cinq\" (\"two point ten-thousand and five\"), it's dubious that a speaker would find such easier to read than \"deux virgule une zéro zéro zéro cinq\". (The place value of large strings tend to take longer to recognize.)\n", - "\n", - "While hardly satisfying, these two points will allow us to dismiss *some* worry. With the former observation being outside our grammar's ability to manage, we accommodate the latter point by using an alternate WFST from our CardinalFST: `numbers_up_to_million`. (To utilize in your own language, create a WFST in the Cardinal class right before building up to `graph_millions`. Again, calling `optimize` is advised.)\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "piNe1AWspa4J" - }, - "outputs": [], - "source": [ - "cardinal = CardinalFst().numbers_up_to_million\n", - "\n", - "# place cardinal under closure to permit values <=1\n", - "graph_integer = pynini.closure(cardinal, 0, 1)\n", - "\n", - "delete_virgule = pynutil.delete(\"virgule\")\n", - "graph_decimal = graph_integer + delete_space + delete_virgule\n", - "\n", - "graph_string_of_cardinals = delete_space + cardinal\n", - "graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", - "\n", - "graph_decimal += graph_string_of_cardinals" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "B1gglt0tfM5V" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fVkOWkncgOZc" - }, - "source": [ - "Like with our previous WFSTs, the main duty for the classifier is inserting the necessary properties for the semiotic token. For the `decimal` tag, the following properties are used:\n", - "- `integer_part` - indicates value before decimal marker\n", - "- `fractional_part` - indicates values after the decimal marker\n", - "- `negative` - indicates if value is positive or negative (Optional)\n", - "- `quantity` - designates if decimal is in regards to a specific quantity. (See Quantities.)\n", - "\n", - "We can begin by inserting the `integer_part` around our `cardinal` subcomponent and the `fractional_part` around our `graph_string_of_cardinals`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_zw_cDszh-fB" - }, - "outputs": [], - "source": [ - "graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \")\n", - "graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bxlnn_7tiQMn" - }, - "source": [ - "We then concatenate them together with a component that recognizes and removes the decimal separator." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "BxNS9_AwiWHf" - }, - "outputs": [], - "source": [ - "graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.1) # In cases we don't always have an integer preceding\n", - "graph_decimal_no_sign = graph_integer_or_none + delete_space + pynutil.delete(\"virgule\") + graph_fractional" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "b7uGfsi4i5UI" - }, - "source": [ - "*Note that we allow insertion of 0 if there is no integer to accommodate reading of only decimal values*\n", - "\n", - "Now we allow the possibility of negative values. (Recall French uses \"moins\" to indicate the negative.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "VsP79naojQZR" - }, - "outputs": [], - "source": [ - "graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", - "graph_decimal = graph_negative + graph_decimal_no_sign" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "QTcvq5HqllqW" - }, - "outputs": [], - "source": [ - "example = \"moins deux virgule cent-quatre\"\n", - "apply_fst(example, graph_decimal)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FVKuGj_9mZ75" - }, - "source": [ - "Placing within a `DecimalFst` class, we have:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "tXwr32ermesp" - }, - "outputs": [], - "source": [ - "class DecimalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"decimal\", kind=\"classify\")\n", - " cardinal = cardinal.numbers_up_to_million\n", - " delete_virgule = pynutil.delete(\"virgule\")\n", - "\n", - " graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \") + delete_space\n", - " graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.001) # In cases we don't always have an integer preceding\n", - "\n", - " graph_string_of_cardinals = delete_space + cardinal\n", - " graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", - " graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_decimal_no_sign = graph_integer_or_none + pynutil.delete(\"virgule\") + graph_fractional \n", - "\n", - " graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", - " graph_negative = pynini.closure(graph_negative, 0, 1)\n", - "\n", - " graph_decimal = graph_negative + graph_decimal_no_sign\n", - "\n", - " graph = self.add_tokens(graph_decimal)\n", - " self.fst = graph.optimize()\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gjxI5mEKfHLo" - }, - "source": [ - "### Quantities" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3WuwWPf3py7G" - }, - "source": [ - "Recalling our earlier remarks regarding convention in language use, you may find a need to adjust the DecimalFst when processing specific values. For instance, consider the following equivalencies from English:\n", - "- `1,500,000` = \"one million five hundred thousand\" = \"one point five million\" = `1.5 million`\n", - "- `2,750,000` = \"two million seven hundred and fifty thousand\" = \"two point seven five million\" = `2.75 million`\n", - "\n", - "For large numbers, there is a tendency to use the decimal system as though one is describing a quantity. Notably, there is a minimum value for which this is comfortable. (A speaker of English may say \"three point five trillion\" but \"three point five hundred\" comes off as odd.)\n", - "\n", - "This behavior can occur in other languages. For example, the amount of `$1,500,000` may be read in French as \"une virgule cinq million de dollars\" (\"one point five million dollars\"). " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RgMBIKlYdsGz" - }, - "source": [ - "Our Classifier can be made to accommodate this behavior: we simply need to repeat what we did for `OrdinalFst` and set aside several key terms to trigger our model. For French, we will choose all terms added for values greater than a million. (Chosen empirically.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "vEcsUXw5fUEe" - }, - "outputs": [], - "source": [ - "suffix = pynini.union(\n", - " \"million\",\n", - " \"millions\",\n", - " \"milliard\",\n", - " \"milliards\",\n", - " \"billion\",\n", - " \"billions\",\n", - " \"billiard\",\n", - " \"billiards\",\n", - " \"trillion\",\n", - " \"trillions\",\n", - " \"trilliard\",\n", - " \"trilliards\",\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wIIUAsR-fgQA" - }, - "source": [ - "We will then need to use a WFST to graph any numbers the precede these amounts. Note, unlike for our `DecimalFst`, we need to permit cardinals as well as decimals. This is because we want to be able to normalize a phrase like \"three million\" to `3 million` as this will be less obtrusive than `3,000,000`.\n", - "\n", - "As such, we will call a `CardinalFst` and a `DecimalFst` in for `graph_quantities`. Since these are both utilized for our `DecimalFst`, it would be more efficient to just pass them along as function/class variables." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "yern-idtycWg" - }, - "outputs": [], - "source": [ - "def get_quantity(decimal, cardinal_up_to_thousand):\n", - " key_values = pynini.union(\n", - " \"million\",\n", - " \"millions\",\n", - " \"milliard\",\n", - " \"milliards\",\n", - " \"billion\",\n", - " \"billions\",\n", - " \"billiard\",\n", - " \"billiards\",\n", - " \"trillion\",\n", - " \"trillions\",\n", - " \"trilliard\",\n", - " \"trilliards\",\n", - " )\n", - " # The French WFST that this borrows from has not removed leading zeroes yet.\n", - " numbers = cardinal_up_to_thousand @ (\n", - " pynutil.delete(pynini.closure(\"0\")) + pynini.difference(NEMO_DIGIT, \"0\") + pynini.closure(NEMO_DIGIT)\n", - " )\n", - " res = (\n", - " pynutil.insert(\"integer_part: \\\"\")\n", - " + numbers\n", - " + pynutil.insert(\"\\\"\")\n", - " + (\n", - " pynini.union(delete_hyphen, delete_extra_space)\n", - " ) # Can be written either as 'deux-millions' or 'deux millions' depending on whether it registers as a noun or part of cardinal.\n", - " + pynutil.insert(\" quantity: \\\"\")\n", - " + suffix\n", - " + pynutil.insert(\"\\\"\")\n", - " )\n", - " # Union with decimal to permit either a cardinal or decimal representation.\n", - " res |= decimal + delete_extra_space + pynutil.insert(\" quantity: \\\"\") + suffix + pynutil.insert(\"\\\"\")\n", - " return res" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uT4LMo8ADBAq" - }, - "source": [ - "We can now insert this into our Classifier, producing the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "d2KrCuyGDLwh" - }, - "outputs": [], - "source": [ - "class DecimalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"decimal\", kind=\"classify\")\n", - " quantities_cardinal = cardinal.graph_hundreds_component_at_least_one_none_zero_digit\n", - " cardinal = cardinal.graph_no_exception\n", - " delete_virgule = pynutil.delete(\"virgule\")\n", - "\n", - " graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \") + delete_space\n", - " graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.001) # In cases we don't always have an integer preceding\n", - "\n", - " graph_string_of_cardinals = delete_space + cardinal\n", - " graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", - " graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_decimal_no_sign = graph_integer_or_none + delete_virgule + graph_fractional \n", - "\n", - " graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", - " graph_negative = pynini.closure(graph_negative, 0, 1)\n", - " graph_decimal = graph_negative + graph_decimal_no_sign\n", - "\n", - " # Union default decimal with version that accepts quantities\n", - " graph_decimal |= graph_negative + get_quantity(\n", - " graph_decimal_no_sign, quantities_cardinal\n", - " )\n", - " final_graph = self.add_tokens(graph_decimal)\n", - " self.fst = final_graph.optimize()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "cD-eKqO6qTyh" - }, - "outputs": [], - "source": [ - "cardinal = CardinalFst()\n", - "decimal = DecimalFst(cardinal).fst\n", - "example = \"trois virgule cent-quatre billion\"\n", - "apply_fst(example, decimal)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "HiSLKF3RfRZA" - }, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "QnkOV5FlteQA" - }, - "source": [ - "As before, the Verbalizer is responsible for removing the formatting and rendering a given token in conventional form. As the process remains similar to Ordinals and Cardinals (deleting strings in a regular matter) we will instead focus on a unique concern for `DecimalFst`: numeral spacing.\n", - "\n", - "For some writing systems, decimal numbers and other strings are typically not written as a single string, instead using punctuation to group numbers for clarity. For example, in the United States, integer digits greater than a thousand are separated by commas for every three digits:\n", - "- `12345.678` -> `12,345.678`\n", - "\n", - "A similar rule occurs in French, save it employs spaces on each side of the decimal marker:\n", - "- `12345,6789` -> `12 345,678 9`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2h4WQZ1a4Cpc" - }, - "source": [ - "While simple enough, this rule poses a slight complication: it works from the left and right of the decimal separator, whereas WFSTs process linearly from the beginning (or end) of strings. As such we will need to break the formatting rule into two components: one for the integer component and one for the decimal component." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ViOFNdZw4-qu" - }, - "source": [ - "Starting with the integer component, we need our subcomponent to recognize every three digits and insert a space before. We can achieve this with some `graph_utils` helper objects - `NEMO_DIGIT` and `NEMO_NON_BREAKING_SPACE`, which accept all digits and non-breaking spaces, respectively. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Z36be2Vo5VbR" - }, - "outputs": [], - "source": [ - "every_three_digits = NEMO_DIGIT ** 3 # accepts a string of three digits\n", - "space_every_three_integer = pynini.closure(NEMO_NON_BREAKING_SPACE + every_three_digits) # inserts space before every three digits." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RSB2gGH-5vwi" - }, - "source": [ - "However, we cannot let the component insert spaces when there are *only* three digits (e.g. `100`.) As such, we need to make sure the insertion only begins starting from the beginning of a string (e.g. when there is a string between one and three digits.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wfWp3ghH6mDQ" - }, - "outputs": [], - "source": [ - "space_every_three_integer = pynini.closure(NEMO_DIGIT, 1, 3) + space_every_three_integer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NJrQYSfA6vyu" - }, - "source": [ - "For the case of the decimal spacing, we simply reverse the logic:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "vBP6ncTp6yXX" - }, - "outputs": [], - "source": [ - "space_every_three_decimal = pynini.closure(NEMO_NON_BREAKING_SPACE + every_three_digits)\n", - "space_every_three_decimal = space_every_three_decimal + pynini.closure(NEMO_DIGIT, 1, 3)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "WRXPN_gk69VV" - }, - "source": [ - "Placed into our Verbalizer, we would see the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "h49eztvs7BXH" - }, - "outputs": [], - "source": [ - "class DecimalFst(GraphFst):\n", - " \"\"\"\n", - " Finite state transducer for verbalizing decimal, e.g.\n", - " decimal { negative: \"true\" integer_part: \"12\" fractional_part: \"5006\" quantity: \"billion\" } -> -12.5006 billion\n", - " \"\"\"\n", - "\n", - " def __init__(self):\n", - " super().__init__(name=\"decimal\", kind=\"verbalize\")\n", - "\n", - " # Need parser to group digits by threes\n", - " exactly_three_digits = NEMO_DIGIT ** 3\n", - " at_most_three_digits = pynini.closure(NEMO_DIGIT, 1, 3)\n", - "\n", - " space_every_three_integer = (\n", - " at_most_three_digits + (pynutil.insert(NEMO_NON_BREAKING_SPACE) + exactly_three_digits).closure()\n", - " )\n", - " space_every_three_decimal = (\n", - " pynini.accep(\",\")\n", - " + (exactly_three_digits + pynutil.insert(NEMO_NON_BREAKING_SPACE)).closure()\n", - " + at_most_three_digits\n", - " )\n", - " group_by_threes = space_every_three_integer | space_every_three_decimal\n", - " self.group_by_threes = group_by_threes\n", - "\n", - " optional_sign = pynini.closure(pynini.cross(\"negative: \\\"true\\\"\", \"-\") + delete_space, 0, 1)\n", - " integer = (\n", - " pynutil.delete(\"integer_part:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " integer = integer @ group_by_threes\n", - " optional_integer = pynini.closure(integer + delete_space, 0, 1)\n", - " fractional = (\n", - " pynutil.insert(\",\")\n", - " + pynutil.delete(\"fractional_part:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " fractional = fractional @ group_by_threes\n", - " optional_fractional = pynini.closure(fractional + delete_space, 0, 1)\n", - " quantity = (\n", - " pynutil.delete(\"quantity:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " optional_quantity = pynini.closure(pynutil.insert(\" \") + quantity + delete_space, 0, 1)\n", - " graph = (optional_integer + optional_fractional + optional_quantity).optimize()\n", - " self.numbers = graph # Saving just the part of the graph used for numbers\n", - " graph = optional_sign + graph\n", - " delete_tokens = self.delete_tokens(graph)\n", - " self.fst = delete_tokens.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Trying out some examples:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fst = DecimalFst().fst\n", - "\n", - "example1 = 'decimal { integer_part: \"3\" fractional_part: \"10453\" quantity: \"billion\" }'\n", - "example2 = 'decimal { integer_part: \"22323\" fractional_part: \"104553\" }'\n", - "\n", - "apply_fst(example1, fst)\n", - "apply_fst(example2, fst)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CZbshZCW8clI" - }, - "source": [ - "# Money WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xuiv8HMz7yjm" - }, - "source": [ - "Now that we've handled some of the foundational classes, it's time to see how they build up to permit more concrete ones. Let's see how the previous WFSTs assist in building a WFST for normalizing currency: the `MoneyFst`. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wTU2c7MtUpqF" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qqyRm8Ru8TDf" - }, - "source": [ - "While the exact phrasing will vary, a valid string for currency will possess the following qualities:\n", - "- A major and/or minor denomination of currency\n", - "- A numeric quantity of the denomination \n", - "\n", - "As our `CardinalFst` and `OrdinalFst` already allow us to normalize the quantity, the only issue for `MoneyFst` is to graph the amounts and build a vocabulary to recognize the denominations.\n", - "\n", - "For French, we will use the following examples to build upon:\n", - "- \"une euros\" -> `1 €`\n", - "- \"deux euros\" -> `2 €` \n", - "- \"deux euros cinq\" -> `2,5 €` \n", - "- \"cinq centimes\" -> `0,5 €`\n", - "- \"deux billions de euros\" -> `2 billions de euros`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FMqUir9n9_cA" - }, - "source": [ - "These suggest the following requirements of our grammar:\n", - "- There must be a mapping between \"euro\" and \"centime\" and `€` in our vocabulary\n", - "- This mapping must allow both singular and plural forms\n", - "- The currency denomination is phrased between major and minor denominations (\"une euro cinq\" and not \"une cinq euro\")\n", - "- Large quantities of currency are left 'as is' instead of normalized\n", - "\n", - "We may deal with the vocabulary in the typical fashion:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XN9nbNhB-vEV" - }, - "outputs": [], - "source": [ - "major_currency = pynini.string_map([(\"euro\", \"€\")])\n", - "minor_currency = pynini.string_map([(\"centime\", \"€\")])\n", - "\n", - "graph_plural = pynutil.delete(\"s\").ques\n", - "\n", - "major_currency += graph_plural\n", - "minor_currency += graph_plural" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3aHrm1qPAc-f" - }, - "source": [ - "Moving to the numbers, note that we need to append a leading zero to the value of fractional currency amounts (\"five cents\" -> `$0.05`). We bring back the subgraph from `CardinalFst` that maps tokens to numbers without tokenization to assist with this:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "jwi-yQW1AjvG" - }, - "outputs": [], - "source": [ - "from nemo_text_processing.inverse_text_normalization.fr.taggers import cardinal\n", - "\n", - "cardinal_graph = cardinal.CardinalFst()\n", - "graph_cardinal = cardinal_graph.graph_no_exception # graphs cardinals w/o tokenization\n", - "\n", - "add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", - "graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let us consider how to manage arge quantities of currency. In our example (\"deux billions de euros\" -> `2 billions de euros`) we see that its behavior mirrors that of our `get_quantity` portion of `DecimalFst`. As such, it would be useful if there was a subcomponent of that graph that we could use in here. Like in the case of `CardinalFst`, let us go back and create a subgraph for later use. Since all our quantities are positive, this would be best accomplished right before incorporating the `negative` property, creating a `self.final_graph_wo_negative`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class DecimalFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst):\n", - " super().__init__(name=\"decimal\", kind=\"classify\")\n", - " quantities_cardinal = cardinal.graph_hundreds_component_at_least_one_none_zero_digit\n", - " cardinal = cardinal.graph_no_exception\n", - " delete_virgule = pynutil.delete(\"virgule\")\n", - "\n", - " graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \") + delete_space\n", - " graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.001) # In cases we don't always have an integer preceding\n", - "\n", - " graph_string_of_cardinals = delete_space + cardinal\n", - " graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", - " graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_decimal_no_sign = graph_integer_or_none + delete_virgule + graph_fractional \n", - "\n", - " ### NEW GRAPH HERE\n", - " self.final_graph_wo_negative = graph_decimal_no_sign | get_quantity(\n", - " final_graph_wo_sign, cardinal.graph_hundreds_component_at_least_one_none_zero_digit\n", - " )\n", - " \n", - " graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", - " graph_negative = pynini.closure(graph_negative, 0, 1)\n", - " graph_decimal = graph_negative + graph_decimal_no_sign\n", - "\n", - " # Union default decimal with version that accepts quantities\n", - " graph_decimal |= graph_negative + get_quantity(\n", - " graph_decimal_no_sign, quantities_cardinal\n", - " )\n", - " final_graph = self.add_tokens(graph_decimal)\n", - " self.fst = final_graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Allowing us to change our grammar to:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nemo_text_processing.inverse_text_normalization.fr.taggers import cardinal, decimal\n", - "\n", - "cardinal_graph = cardinal.CardinalFst()\n", - "decimal_graph = decimal.DecimalFst(cardinal_graph)\n", - "\n", - "graph_cardinal = cardinal_graph.graph_no_exception # graphs cardinals w/o tokenization\n", - "graph_decimal = decimal_graph.final_graph_wo_negative # graphs positive decimals w/o tokenization\n", - "\n", - "add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", - "graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "L1RHoW-TLzIz" - }, - "source": [ - "Note that by doing this, we're also incorporating the formatting from the `decimal` class up to this point. Since these overlap with the `money` class (see next section), we have saved ourselves some work. \n", - "\n", - "Since we already made `graph_quantity` part of our `DecimalFst`, we can avoid dealing with large quantities now. However, this does mean we still need a way to leave currencies 'as is' without normalization. We can do this by using the `project` method, which will create a WFST that excepts either all valid inputs or all valid outputs of another WFST (depending on argument)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7l_TLtJkMluU" - }, - "outputs": [], - "source": [ - "major_currency_no_normalize = major_currency.project(\"input\")\n", - "apply_fst(\"euro\", major_currency_no_normalize)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "raBdHc_WXEpG" - }, - "source": [ - "We then append this WFST with a WFST that recognizes prepositions commonly used before large values of currency (\"d'\", \"des\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "CEuxiVgDXRBf" - }, - "outputs": [], - "source": [ - "graph_preposition = pynini.union(\"des \", \"d'\") # Used for large amounts (billions de euros)\n", - "major_currency_no_normalize = pynini.closure(graph_preposition, 0, 1) + major_currency.project(\"input\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FlXmf8Fq_Rm1" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "T5BBuQRzLuXS" - }, - "source": [ - "For the Money semiotic class, we have available the following properties for tokenization:\n", - "- `integer_part`\n", - "- `fractional_part` \n", - "- `currency`\n", - "\n", - "Laying the initial groundwork seems simple enough. We first instantiate our `MoneyFst` classifier with our initial grammars:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "EZaCeHcFWVP3" - }, - "outputs": [], - "source": [ - "class MoneyFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst, decimal: GraphFst):\n", - " super().__init__(name=\"money\", kind=\"classify\")\n", - " major_currency = pynini.string_map([(\"euro\", \"€\")])\n", - " minor_currency = pynini.string_map([(\"centime\", \"€\")])\n", - "\n", - " graph_plural = pynutil.delete(\"s\").ques\n", - "\n", - " major_currency += graph_plural\n", - " minor_currency += graph_plural\n", - "\n", - " major_currency_no_normalize = major_currency.project(\"input\")\n", - " graph_preposition = pynini.union(\"des \", \"d'\") # Used for large amounts (billions de euros)\n", - " major_currency_no_normalize = graph_preposition + major_currency.project(\"input\")\n", - "\n", - " graph_cardinal = cardinal.graph_no_exception\n", - " graph_decimal = decimal.final_graph_wo_negative\n", - "\n", - " add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", - " graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_bpkXroLWaBo" - }, - "source": [ - "Let us now manage the `currency` property. We have the following scenarios to consider:\n", - "- Major denomination only\n", - "- Minor denomination only\n", - "- Major denomination and implicit minor denomination (\"cinq euro trois\")\n", - "- Major denomination and explicit minor denomination (\"cinq euros et trois centimes\")\n", - "- Large quantities of euros (\"cinq billion des euros\")\n", - "\n", - "Note how across cases the use of `graph_cardinal` and `graph_decimal` will be applied differently. Further, we may have varying orders in which tags are assigned proper values. For instance, if we have only minor denomination we would assign `fractional_part` before `currency`. Meanwhile, major denomination and implicit minor denomination would be the order of `integer_part`, `currency`, `fractional_part`. While we could try and figure out a way to preserve order, recall that the use of permutations in NeMo ITN makes that unnecessary: we can assume the desired order of tags reach our Verbalizer without make overt efforts in our Classifier! \n", - "\n", - "For example, let's say we need to process \"five dollars\" as `$5.00`. Processed linearly, we could get a token sequence along the lines of: `{ integer_part: \"5\" currency: \"$\" }`. If we passed this token array straight to a Verbalizer, we would need to configure a graph that effectively reverses the order so we could parse the `currency` field prior to the `integer_part` field, perhaps something along the lines of: \n", - "\n", - "`pynutil.insert(\"$\") + delete_space + pynutil.delete('integer_part: \\\"') +.... + pynutil.delete('currency: \"$\"')`\n", - "\n", - "But since NeMo creates permutations of our Classifier outputs, this is unnecessary. We can simply assume whatever would be the most convenient order for us (e.g. `{ currency: \"$\" integer_part: \"5\" }`) and build our Verbalizer around that:\n", - "\n", - "`pynutil.delete('currency: \\\"') + NEMO_SIGMA + pynutil.delete('\\\" integer_part: \\\"') + NEMO_DIGIT +...`\n", - "\n", - "Along with helping to keep our script simpler (we can focus simply on tokenization and not worry about what input order our Verbalizers will accept), this also allows us to overcome structural constraints of WFSTs, namely that they are [limited in reordering text strings](https://en.wikipedia.org/wiki/Pushdown_automaton)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fMZ13D2Dh9ZF" - }, - "source": [ - "Keeping this in mind, let's begin mapping the proper tags. Since they're relatively simple, we can start with only major and minor denominations:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "EtwWLp7VbbjM" - }, - "outputs": [], - "source": [ - "graph_integer_component = pynutil.insert(\"integer_part: \\\"\") + graph_cardinal + pynutil.insert(\"\\\"\")\n", - "graph_fractional_component = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\")\n", - "\n", - "graph_major_currency = pynutil.insert(\" currency: \\\"\") + major_currency + pynutil.insert(\"\\\"\")\n", - "graph_minor_currency = pynutil.insert(\" currency: \\\"\") + minor_currency + pynutil.insert(\"\\\"\")\n", - "\n", - "graph_only_major_money = graph_integer_component + delete_space + graph_major_currency\n", - "graph_only_minor_money = graph_fractional_component + delete_space + graph_minor_currency " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "XTmxrK4DmS39" - }, - "source": [ - "Now we may append the case of an implicit `fractional_part` to `graph_only_major_money`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Zvzn3pQinkT0" - }, - "outputs": [], - "source": [ - "implicit_fractional_part = delete_space + pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", - "implicit_fractional_part = pynini.closure(implicit_fractional_part, 0, 1) " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "tKFZkCVmn1OX" - }, - "source": [ - "And the explicit fractional portion:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "d_h0pTlMn3jz" - }, - "outputs": [], - "source": [ - "delete_et = pynutil.delete(\"et \") # Sometimes prefaces the minor currency\n", - "delete_et = pynini.closure(delete_et, 0 , 1)\n", - "\n", - "delete_minor = pynutil.delete(minor_currency.project(\"input\")) # to remove the minor currency\n", - "\n", - "explicit_fractional_part = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", - "explicit_fractional_part = delete_space + delete_et + explicit_fractional_part + delete_space + delete_minor\n", - "explicit_fractional_part = pynini.closure(explicit_fractional_part, 0, 1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rvnpAudgo-o3" - }, - "source": [ - "We join them together:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qYzlIRWTpD8e" - }, - "outputs": [], - "source": [ - "graph_major_money = graph_only_major_money + (implicit_fractional_part | explicit_fractional_part)\n", - "graph_standard_money = graph_major_money | graph_only_minor_money" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TzeaKXVzpYs8" - }, - "source": [ - "Finishing with the case the large quantities of money, we need to use `graph_decimal` so we can exploit its ability to map quantities. Note that since we are using a pre-existing WFST, we can ignore inserting the tags ourselves, since this is already done by the Decimal WFST. As long as we remember to process this aspect with our Verbalizer, we can spare ourselves the extra step." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LnqX9mGFpmJm" - }, - "outputs": [], - "source": [ - "graph_large_money = pynutil.insert(\" currency: \\\"\") + major_currency_no_normalize + pynutil.insert(\"\\\"\")\n", - "graph_large_money = graph_decimal + delete_space + graph_large_money" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "24TUZnJKqgPA" - }, - "source": [ - "Alltogether, this would give the following Classifier:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "B7-muCO2qizg" - }, - "outputs": [], - "source": [ - "class MoneyFst(GraphFst):\n", - " def __init__(self, cardinal: GraphFst, decimal: GraphFst):\n", - " super().__init__(name=\"money\", kind=\"classify\")\n", - " major_currency = pynini.string_map([(\"euro\", \"€\")])\n", - " minor_currency = pynini.string_map([(\"centime\", \"€\")])\n", - "\n", - " graph_plural = pynutil.delete(\"s\").ques\n", - "\n", - " major_currency += graph_plural\n", - " minor_currency += graph_plural\n", - "\n", - " major_currency_no_normalize = major_currency.project(\"input\")\n", - " graph_preposition = pynini.union(\"des \", \"d'\") # Used for large amounts (billions de euros)\n", - " major_currency_no_normalize = graph_preposition + major_currency.project(\"input\")\n", - "\n", - " graph_cardinal = cardinal.graph_no_exception\n", - " graph_decimal = decimal.final_graph_wo_negative\n", - "\n", - " add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", - " graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit\n", - "\n", - " graph_integer_component = pynutil.insert(\"integer_part: \\\"\") + graph_cardinal + pynutil.insert(\"\\\"\")\n", - " graph_fractional_component = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_major_currency = pynutil.insert(\" currency: \\\"\") + major_currency + pynutil.insert(\"\\\"\")\n", - " graph_minor_currency = pynutil.insert(\" currency: \\\"\") + minor_currency + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_only_major_money = graph_integer_component + delete_space + graph_major_currency\n", - " graph_only_minor_money = graph_fractional_component + delete_space + graph_minor_currency \n", - "\n", - " implicit_fractional_part = delete_space + pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", - " implicit_fractional_part = pynini.closure(implicit_fractional_part, 0, 1) \n", - "\n", - "\n", - " delete_et = pynutil.delete(\"et \") # Sometimes prefaces the minor currency\n", - " delete_et = pynini.closure(delete_et, 0 , 1)\n", - "\n", - " delete_minor = pynutil.delete(minor_currency.project(\"input\")) # to remove the minor currency\n", - "\n", - " explicit_fractional_part = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", - " explicit_fractional_part = delete_space + delete_et + explicit_fractional_part + delete_space + delete_minor\n", - " explicit_fractional_part = pynini.closure(explicit_fractional_part, 0, 1)\n", - "\n", - " graph_major_money = graph_only_major_money + (implicit_fractional_part | explicit_fractional_part)\n", - "\n", - " graph_large_money = pynutil.insert(\" currency: \\\"\") + major_currency_no_normalize + pynutil.insert(\"\\\"\")\n", - " graph_large_money = graph_decimal + delete_space + graph_large_money\n", - "\n", - " final_graph = graph_large_money | graph_major_money | graph_only_minor_money\n", - "\n", - " final_graph = self.add_tokens(final_graph)\n", - " self.fst = final_graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's see the results:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nemo_text_processing.inverse_text_normalization.fr.taggers import decimal, cardinal\n", - "\n", - "cardFst = cardinal.CardinalFst()\n", - "decFst = decimal.DecimalFst(cardFst)\n", - "\n", - "moneyFst = MoneyFst(cardFst, decFst).fst\n", - "\n", - "example = \"douze virgule cinq billions d'euros\"\n", - "\n", - "apply_fst(example, moneyFst)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gxdcyuLmAZZa" - }, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZZFDWNwY6sOG" - }, - "source": [ - "By this point, the creation of the Verbalizer should be rather straight-forward - delete the expected tokens and perform any specific formatting that was not caught by the Classifier. \n", - "\n", - "In fact, it is so straight-forward that much of the work does not even need to be explicitly managed by the Verbalizer. As mentioned previously, two of the properties we inserted in our Classifier where already referenced in our `DecimalFst` - `integer_part` and `fractional_part`. We even went so far to directly call a component of `DecimalFst` in our Classifier. As such, outside of the `currency` property - there is little in our Money token that is different from a standard Decimal token. Indeed, even the normalized forms are similar (`200,5` vs. `200,5 €`.) " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "T7sgH0t79tmU" - }, - "source": [ - "Given these similarities, it seems that we can save ourselves some work and simply use the Decimal Verbalizer to manage much of the normalization. Let's look at the basic format of our `MoneyFst` verbalizer, writing it so it accepts a `DecimalFst` as input:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "BEu8nITP9mSG" - }, - "outputs": [], - "source": [ - "class MoneyFst(GraphFst):\n", - " def __init__(self, decimal: GraphFst):\n", - " super().__init__(name=\"money\", kind=\"verbalize\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JYVLou5N-Dk8" - }, - "source": [ - "We manage the issue of deleting the `currency` property:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LO35tJ7G-H6N" - }, - "outputs": [], - "source": [ - "class MoneyFst(GraphFst):\n", - " def __init__(self, decimal: GraphFst):\n", - " super().__init__(name=\"money\", kind=\"verbalize\")\n", - " unit = (\n", - " pynutil.delete(\"currency:\")\n", - " + delete_extra_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bDS8XSII-Dpd" - }, - "source": [ - "Now consider, we need to normalize an integer component, a fractional component, and a decimal to separate them. Since NeMo will automatically permutate all tags, we can assume whatever order we want. As such, we can assume we get the exact order that is accepted by our `DecimalFst`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "VtGfpjVA-r3u" - }, - "outputs": [], - "source": [ - " def __init__(self, decimal: GraphFst):\n", - " super().__init__(name=\"money\", kind=\"verbalize\")\n", - " unit = (\n", - " pynutil.delete(\"currency:\")\n", - " + delete_extra_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " graph = decimal.numbers + delete_space + unit\n", - " delete_tokens = self.delete_tokens(graph)\n", - " self.fst = delete_tokens.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZefxZLIU-uRU" - }, - "source": [ - "It is as simple and compact as appending the `unit` component to the preexisting `decimal.numbers`. \n", - "\n", - "This feature is worth keeping in mind as you build up to more concrete classes: the combination of guaranteed tag permutations and prebuilt Verbalizers make the addition of semiotic classes progressively simpler despite the building complexity of your entire grammar." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "WydC7Cn28l5Y" - }, - "source": [ - "# Time WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VelunbumCJJe" - }, - "source": [ - "Our next composite graph will be for the Time WFST. Here, you may see more variation between your language and our example than with our previous classes. This is for a number of reasons, among them being that while there may be some standard cross linguistic patterns regarding time (e.g. `quantity_of_hours + quantity_of_minutes`), the use of various equivalent phrases can make an exhaustive grammar incredibly specific (e.g. consider managing \"twelve fifteen\", \"twelve and a quarter\", \"quarter past twelve\", \"quarter after twelve\", and \"forty five until one\" all together). You may find yourself drawing upon WFSTs that accommodate Cardinals, Fractions, and some basic subtraction.\n", - "\n", - "As such, we are going to focus on those aspects of the Time WFST that are necessary for a functional normalization of time related phrases, saving a more exhaustive grammar for your own specific languages and use cases." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8wqb28wzATOR" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "AVntDM3AEz0v" - }, - "source": [ - "For our Time WFST, we will focus on the following aspects:\n", - "- Use of 24 or 12 hour base\n", - "- Use of fraction terminology (e.g. \"quarter\" = `15`)\n", - "- Accommodation of key-words (\"noon\", \"midnight\")\n", - "- Counting backwards from the hour (\"ten to five\", \"five to three\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "seU9hTbgFgu7" - }, - "source": [ - "We'll start with the basic system.\n", - "\n", - "For French, time operates on a twenty-four hour system, with the zeroth hour being midnight. Time is given in the following format:\n", - "\n", - "`cardinal + heure(s) + (cardinal)` \n", - "\n", - "This is normalized as:\n", - "\n", - "`cardinal h (cardinal)`\n", - "\n", - "For instance, for `3:03`, we would have:\n", - "- input: \"trois heures trois\"\n", - "- output: `3 h 03`\n", - "\n", - "As such, our grammar needs to utilize a Cardinal WFST and have a means to accept \"heures\" from the input. Taking care of the latter case is simple enough:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "HTSVxf4fI_ND" - }, - "outputs": [], - "source": [ - "graph_heures = pynini.accep(\"heure\") + pynini.accep(\"s\").ques\n", - "graph_heures = pynutil.delete(graph_heures)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6LW7pXaXJSZa" - }, - "source": [ - "For the cardinals, we could pass an instance of `CardinalFST` to our graph. But do we really need that level of coverage? We only really need to cover the numbers 0 - 60, which we could simply write a new WFST for. Further, it may be beneficial to allow our graph to separate possible ambiguity. While we will not cover it in our tutorial, you may in the future find it necessary to build a WFST for Measurements, of which quantities of time may play a part. Would it not be helpful for you WFST to know that \"thirty hours\" could only ever be a measurement instead of a possible time of day?\n", - "\n", - "Given the little amount of effort necessary and the quick benefit, we choose to make our hours and minutes explicit in the Time WFST." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "R4aa06ZPLKIR" - }, - "outputs": [], - "source": [ - "hours = pynini.string_map([\n", - " (\"zéro\",\"0\"),\n", - " (\"une\",\"1\"),\n", - " (\"deux\",\"2\"),\n", - " (\"trois\",\"3\"),\n", - " (\"quatre\",\"4\"),\n", - " (\"cinq\",\"5\"),\n", - " (\"six\",\"6\"),\n", - " (\"sept\",\"7\"),\n", - " (\"huit\",\"8\"),\n", - " (\"neuf\",\"9\"),\n", - " (\"dix\",\"10\"),\n", - " (\"onze\",\"11\"),\n", - " (\"douze\",\"12\"),\n", - " (\"treize\",\"13\"),\n", - " (\"quatorze\",\"14\"),\n", - " (\"quinze\",\"15\"),\n", - " (\"seize\",\"16\"),\n", - " (\"dix-sept\",\"17\"),\n", - " (\"dix-huit\",\"18\"),\n", - " (\"dix-neuf\",\"19\"),\n", - " (\"vingt\",\"20\"),\n", - " (\"vingt-et-une\",\"21\"),\n", - " (\"vingt et une\",\"21\"),\n", - " (\"vingt-deux\",\"22\"),\n", - " (\"vingt-trois\",\"23\"),\n", - " (\"vingt-quatre\",\"24\"),\n", - "])\n", - "minutes = pynini.string_map([\n", - " (\"une\", \"01\"),\n", - " (\"deux\", \"02\"),\n", - " (\"trois\", \"03\"),\n", - " (\"quatre\", \"04\"),\n", - " (\"cinq\", \"05\"),\n", - " (\"six\", \"06\"),\n", - " (\"sept\", \"07\"),\n", - " (\"huit\", \"08\"),\n", - " (\"neuf\", \"09\"),\n", - " (\"dix\", \"10\"),\n", - " (\"onze\", \"11\"),\n", - " (\"douze\", \"12\"),\n", - " (\"treize\", \"13\"),\n", - " (\"quatorze\", \"14\"),\n", - " (\"quinze\", \"15\"),\n", - " (\"seize\", \"16\"),\n", - " (\"dix-sept\", \"17\"),\n", - " (\"dix-huit\", \"18\"),\n", - " (\"dix-neuf\", \"19\"),\n", - " (\"vingt\", \"20\"),\n", - " (\"vingt-et-une\", \"21\"),\n", - " (\"vingt et une\", \"21\"),\n", - " (\"vingt-deux\", \"22\"),\n", - " (\"vingt-trois\", \"23\"),\n", - " (\"vingt-quatre\", \"27\"),\n", - " (\"vingt-cinq\", \"25\"),\n", - " (\"vingt-six\", \"26\"),\n", - " (\"vingt-sept\", \"27\"),\n", - " (\"vingt-huit\", \"28\"),\n", - " (\"vingt-neuf\", \"29\"),\n", - " (\"trente\", \"30\"),\n", - " (\"trente-et-une\", \"31\"),\n", - " (\"trente et une\", \"31\"),\n", - " (\"trente-deux\", \"32\"),\n", - " (\"trente-trois\", \"33\"),\n", - " (\"trente-quatre\", \"34\"),\n", - " (\"trente-cinq\", \"35\"),\n", - " (\"trente-six\", \"36\"),\n", - " (\"trente-sept\", \"37\"),\n", - " (\"trente-huit\", \"38\"),\n", - " (\"trente-neuf\", \"39\"),\n", - " (\"quarante\", \"40\"),\n", - " (\"quarante-et-une\", \"41\"),\n", - " (\"quarante et une\", \"41\"),\n", - " (\"quarante-deux\", \"42\"),\n", - " (\"quarante-trois\", \"43\"),\n", - " (\"quarante-quatre\", \"44\"),\n", - " (\"quarante-cinq\", \"45\"),\n", - " (\"quarante-six\", \"46\"),\n", - " (\"quarante-sept\", \"47\"),\n", - " (\"quarante-huit\", \"48\"),\n", - " (\"quarante-neuf\", \"49\"),\n", - " (\"cinquante\", \"50\"),\n", - " (\"cinquante-et-une\", \"51\"),\n", - " (\"cinquante et une\", \"51\"),\n", - " (\"cinquante-deux\", \"52\"),\n", - " (\"cinquante-trois\", \"53\"),\n", - " (\"cinquante-quatre\", \"54\"),\n", - " (\"cinquante-cinq\", \"55\"),\n", - " (\"cinquante-six\", \"56\"),\n", - " (\"cinquante-sept\", \"57\"),\n", - " (\"cinquante-huit\", \"58\"),\n", - " (\"cinquante-neuf\", \"59\"),\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4SmNsNKLM9cC" - }, - "source": [ - "Now that we've managed the basic graph, we can address some of the more niche rules of French timekeeping.\n", - "\n", - "To start, French employs some colloquialisms that will be familiar to English speakers: minutes that are multiples of fifteen are referred to as fractions of a clock. In particular:\n", - "- `5 h 15` -> \"cinq heures **et quart**\"\n", - "- `5 h 30` -> \"cinq heures **et demie**\"\n", - "- `5 h 45` -> \"cinq eures **et trois quarts**\"\n", - "\n", - "We thus need a means of rendering these as their numerical equivalents:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xHe3nfrpSlrE" - }, - "outputs": [], - "source": [ - "# Mapping 'et demi' and 'et qart'\n", - "graph_et = pynutil.delete(\"et\") + delete_space\n", - "\n", - "graph_demi = pynini.accep(\"demi\")\n", - "graph_demi += pynini.accep(\"e\").ques # people vary on feminine or masculine form\n", - "graph_demi = pynini.cross(graph_demi, \"30\")\n", - "\n", - "graph_quart = pynini.accep('quart')\n", - "graph_quart = pynini.cross(graph_quart, '15')\n", - "graph_trois_quart = pynini.cross(\"trois quarts\", \"45\")\n", - "\n", - "graph_fractions = graph_demi | graph_quart | graph_trois_quart\n", - "graph_fractions = graph_et + graph_fractions" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "HD2wobIQS3fX" - }, - "source": [ - "Also like English, French will use key words to designate a specific timeslot. Noon and midnight are \"midi\" and \"minuit\" respectively." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ahbkiZFuTN2t" - }, - "outputs": [], - "source": [ - "# Midi and minuit\n", - "graph_midi = pynini.cross(\"midi\", \"12\")\n", - "graph_minuit = pynini.cross(\"minuit\", \"0\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6OyMoqfZTX1U" - }, - "source": [ - "Now it's time to throw a wrench into things: counting backwards from the hour. How are we to get what is essentially a graph to do the subtraction necessarily for \"ten to twelve\" to become `11:50`?\n", - "\n", - "Easy: we build the subtraction into the graph itself. That is, we map the hours and minutes produced by our graph onto another graph that produces their amount shifted back a value.\n", - "\n", - "Let's take our \"ten to twelve\" example. Normally \"ten\" would map to `10` and \"twelve\" to `12`. But with these new graphs, the detection of the pattern `minute + to + hour` would signal that `10` should now become `50` and `12` become `11`." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uMWifbm1VQjP" - }, - "source": [ - "Let us do this for our French example. Luckily enough, the indication that a French string is regular: counting backwards from the hour is by use of the pattern `cardinal + heures + moins + minutes`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "c4bV3T1pViCH" - }, - "outputs": [], - "source": [ - "hours_to = pynini.string_map([\n", - " (\"1\",\"0\"),\n", - " (\"2\",\"1\"),\n", - " (\"3\",\"2\"),\n", - " (\"4\",\"3\"),\n", - " (\"5\",\"4\"),\n", - " (\"6\",\"5\"),\n", - " (\"7\",\"6\"),\n", - " (\"8\",\"7\"),\n", - " (\"9\",\"8\"),\n", - " (\"10\",\"9\"),\n", - " (\"11\",\"10\"),\n", - " (\"12\",\"11\"),\n", - " (\"13\",\"12\"),\n", - " (\"14\",\"13\"),\n", - " (\"15\",\"14\"),\n", - " (\"16\",\"15\"),\n", - " (\"17\",\"16\"),\n", - " (\"18\",\"17\"),\n", - " (\"19\",\"18\"),\n", - " (\"20\",\"19\"),\n", - " (\"21\",\"20\"),\n", - " (\"22\",\"21\"),\n", - " (\"23\",\"22\"),\n", - " (\"24\",\"23\"),\n", - " (\"0\",\"23\"),\n", - "])\n", - "minutes_to = pynini.string_map([\n", - " (\"59\", \"01\"),\n", - " (\"58\", \"02\"),\n", - " (\"57\", \"03\"),\n", - " (\"56\", \"04\"),\n", - " (\"55\", \"05\"),\n", - " (\"54\", \"06\"),\n", - " (\"53\", \"07\"),\n", - " (\"52\", \"08\"),\n", - " (\"51\", \"09\"),\n", - " (\"50\", \"10\"),\n", - " (\"49\", \"11\"),\n", - " (\"48\", \"12\"),\n", - " (\"47\", \"13\"),\n", - " (\"46\", \"14\"),\n", - " (\"45\", \"15\"),\n", - " (\"44\", \"16\"),\n", - " (\"43\", \"17\"),\n", - " (\"42\", \"18\"),\n", - " (\"41\", \"19\"),\n", - " (\"40\", \"20\"),\n", - " (\"39\", \"21\"),\n", - " (\"38\", \"22\"),\n", - " (\"37\", \"23\"),\n", - " (\"36\", \"24\"),\n", - " (\"35\", \"25\"),\n", - " (\"34\", \"26\"),\n", - " (\"33\", \"27\"),\n", - " (\"32\", \"28\"),\n", - " (\"31\", \"29\"),\n", - " (\"30\", \"30\"),\n", - " (\"29\", \"31\"),\n", - " (\"28\", \"32\"),\n", - " (\"27\", \"33\"),\n", - " (\"26\", \"34\"),\n", - " (\"25\", \"35\"),\n", - " (\"24\", \"36\"),\n", - " (\"23\", \"37\"),\n", - " (\"22\", \"38\"),\n", - " (\"21\", \"39\"),\n", - " (\"20\", \"40\"),\n", - " (\"19\", \"41\"),\n", - " (\"18\", \"42\"),\n", - " (\"17\", \"43\"),\n", - " (\"16\", \"44\"),\n", - " (\"15\", \"45\"),\n", - " (\"14\", \"46\"),\n", - " (\"13\", \"47\"),\n", - " (\"12\", \"48\"),\n", - " (\"11\", \"49\"),\n", - " (\"10\", \"50\"),\n", - " (\"09\", \"51\"),\n", - " (\"08\", \"52\"),\n", - " (\"07\", \"53\"),\n", - " (\"06\", \"54\"),\n", - " (\"05\", \"55\"),\n", - " (\"04\", \"56\"),\n", - " (\"03\", \"57\"),\n", - " (\"02\", \"58\"),\n", - " (\"01\", \"59\"),\n", - "])\n", - "graph_moins = pynutil.delete(\"moins\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "XOKETkIYZy5M" - }, - "source": [ - "Why graph the digits instead of the tokens themselves? Along with avoiding some minor repetition and making editing more apparent, it allows this subgraph to be ported to other languages - if so desired.\n", - "\n", - "Further, it helps us illustrate a helpful idea within this tutorial: as long as a pattern is regular and/or finite, it is no major issue to accommodate it in our graph, regardless of mathematic or logic system it employs." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DJbFiD2fAUc5" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "cK0SGXntaDkI" - }, - "source": [ - "Once again we place the grammar within the proper child class of `GraphFst`. We also insert the proper tags for the `Time` class, which are:\n", - "- `hours`\n", - "- `minutes`\n", - "- `suffix` (explained within this section)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "9Eq5r-_VbBIg" - }, - "outputs": [], - "source": [ - "graph_hours_component = pynini.union(hours, graph_midi, graph_minuit)\n", - "graph_hours_component = pynutil.insert(\"hours: \\\"\") + graph_hours_component + pynutil.insert(\"\\\"\")\n", - "\n", - "graph_minutes_component = (\n", - " pynutil.insert(\" minutes: \\\"\") + pynini.union(minutes, graph_fractions) + pynutil.insert(\"\\\"\")\n", - ") \n", - "graph_minutes_component = delete_space + graph_minutes_component\n", - "\n", - "graph_time_standard = (graph_hours_component + delete_space + graph_heures \n", - " + pynini.closure(graph_minutes_component, 0, 1))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2avfS3IacSiC" - }, - "source": [ - "We now setup the alternate graph that allows backwards counting. Note, this is triggered by the occurrence of \"moins\" between the hour and minute component." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "TmpwisOVcn0T" - }, - "outputs": [], - "source": [ - "graph_hours_to_component = hours | graph_midi | graph_minuit\n", - "graph_hours_to_component @= hours_to\n", - "graph_hours_to_component = pynutil.insert(\"hours: \\\"\") + graph_hours_to_component + pynutil.insert(\"\\\"\")\n", - "graph_hours_to_component = graph_hours_to_component + delete_space + graph_heures\n", - "\n", - "graph_minutes_to_component = (minutes | graph_demi | # No 'et' in fractions\n", - " (pynutil.delete(\"le \") + graph_quart) | graph_trois_quart)\n", - "graph_minutes_to_component @= minutes_to\n", - "graph_minutes_to_component = pynutil.insert(\" minutes: \\\"\") + graph_minutes_to_component + pynutil.insert(\"\\\"\")\n", - "\n", - "graph_time_to = graph_hours_to_component + delete_space + graph_moins + delete_space + graph_minutes_to_component" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FkO4tRRfdQT4" - }, - "source": [ - "We now join it with our main component, allowing us to graph all times:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0O0vUVizdU8c" - }, - "outputs": [], - "source": [ - "graph_time = graph_time_standard | graph_time_to" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "jbX4JV-LdY3Y" - }, - "source": [ - "Once again we throw a wrench into things with the `suffix` feature. As in the case of Ordinals and Decimals, key-words can play into our Time WFST. For French, this occurs with the words \"du matin\", \"de l'après-midi\", and \"du soir\". (Respectively: \"in the morning\", \"in the afternoon\", and \"in the evening\".) Much like in English, these phrases alter how we write down the time. But instead of indicating `a.m.` or `p.m.`, these indicate *what hour system is used*. For example:\n", - "- \"deux heures du matin\" -> `2 h` = `2:00 a.m.`\n", - "- \"deux heures de l'après-midi\" -> `14 h` = `2:00 p.m.`\n", - "\n", - "Only a twelve hour system is used when these suffixes accompany the time. As such, our Classifier will need to either adjust the times like in the case of counting backwards or must pass the information to the Verbalizer so it can adjust. \n", - "\n", - "Since our Classifier is long enough as is, we will simply store this information in the `suffix` property and allow the Verbalizer to manage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OqVa78zRgJw9" - }, - "outputs": [], - "source": [ - "graph_suffix_am = pynini.cross(\"du matin\", \"am\")\n", - "graph_suffix_pm = pynini.string_map([(\"de l'après-midi\", \"pm\"),(\"du soir\", \"pm\")])\n", - "\n", - "graph_suffix = pynini.cross(graph_suffix_am, \"am\") | pynini.cross(graph_suffix_pm, \"pm\")\n", - "\n", - "graph_suffix_component = pynutil.insert(\" suffix: \\\"\") + graph_suffix + pynutil.insert(\"\\\"\")\n", - "graph_suffix_component = delete_space + graph_suffix_component\n", - "graph_suffix_component = pynini.closure(graph_suffix_component, 0, 1)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-LaJMIjUf1XR" - }, - "source": [ - "And we append to our graph:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "76myCFiggX3E" - }, - "outputs": [], - "source": [ - "class TimeFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"time\", kind=\"classify\")\n", - " \"\"\"grammar omitted for length\n", - " ....\n", - " ....\n", - " ....\n", - " \"\"\"\n", - " graph_hours_component = pynini.union(hours, graph_midi, graph_minuit)\n", - " graph_hours_component = pynutil.insert(\"hours: \\\"\") + graph_hours_component + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_minutes_component = (\n", - " pynutil.insert(\" minutes: \\\"\") + pynini.union(minutes, graph_fractions) + pynutil.insert(\"\\\"\")\n", - " ) \n", - " graph_minutes_component = delete_space + graph_minutes_component\n", - "\n", - " graph_time_standard = (graph_hours_component + delete_space + graph_heures \n", - " + pynini.closure(graph_minutes_component, 0, 1))\n", - "\n", - " graph_hours_to_component = hours | graph_midi | graph_minuit\n", - " graph_hours_to_component @= hours_to\n", - " graph_hours_to_component = pynutil.insert(\"hours: \\\"\") + graph_hours_to_component + pynutil.insert(\"\\\"\")\n", - " graph_hours_to_component = graph_hours_to_component + delete_space + graph_heures\n", - "\n", - " graph_minutes_to_component = (minutes | graph_demi | # No 'et' in fractions\n", - " (pynutil.delete(\"le \") + graph_quart) | graph_trois_quart)\n", - " graph_minutes_to_component @= minutes_to\n", - " graph_minutes_to_component = pynutil.insert(\" minutes: \\\"\") + graph_minutes_to_component + pynutil.insert(\"\\\"\")\n", - "\n", - " graph_time_to = graph_hours_to_component + delete_space + graph_moins + delete_space + graph_minutes_to_component\n", - "\n", - " graph_time_no_suffix = graph_time_standard | graph_time_to\n", - "\n", - " graph_suffix_am = pynini.cross(\"du matin\", \"am\")\n", - " graph_suffix_pm = pynini.string_map([(\"de l'après-midi\", \"pm\"),(\"du soir\", \"pm\")])\n", - "\n", - " graph_suffix = pynini.cross(graph_suffix_am, \"am\") | pynini.cross(graph_suffix_pm, \"pm\")\n", - "\n", - " graph_suffix_component = pynutil.insert(\" suffix: \\\"\") + graph_suffix + pynutil.insert(\"\\\"\")\n", - " graph_suffix_component = delete_space + graph_suffix_component\n", - " graph_suffix_component = pynini.closure(graph_suffix_component, 0, 1)\n", - " \n", - " final_graph = graph_time_no_suffix + graph_suffix_component\n", - "\n", - " final_graph = self.add_tokens(final_graph)\n", - "\n", - " self.fst = final_graph.optimize()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's see how we did:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "time = TimeFst().fst\n", - "example = \"quatre heures moins cinq\"\n", - "apply_fst(example, time)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lPlJ1qyeAWOL" - }, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CrO-xtJ87PEl" - }, - "source": [ - "The initial part of the Verbalizer should appear familiar. We delete the property tags `hours` and `minutes`, making sure they preserve the actual values for formatting." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "fCzZKR7ek0Mz" - }, - "outputs": [], - "source": [ - "hour = (\n", - " pynutil.delete(\"hours:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1, 2)\n", - " + pynutil.delete(\"\\\"\")\n", - ")\n", - "minute = (\n", - " pynutil.delete(\"minutes:\")\n", - " + delete_extra_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1, 2)\n", - " + pynutil.delete(\"\\\"\")\n", - ")\n", - "graph = hour + delete_extra_space + pynutil.insert(\"h\") + minute.ques" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "WnVV9GUKk-b7" - }, - "source": [ - "We then deal with the case of `suffix`. We first note that if the suffix is for a morning time (before noon), then there is no further conversion that is needed. We may simply delete the property and its value." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "haOEiSbglc6s" - }, - "outputs": [], - "source": [ - "day_suffixes = pynutil.delete(\"suffix: \\\"am\\\"\")\n", - "\n", - "graph = hours + delete_extra_space + pynutil.insert(\"h\") + minute.ques + delete_space + day_suffixes.ques" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wL0FNg6Xlhb-" - }, - "source": [ - "Meanwhile, the post-noon suffixes would require us shifting the hours value by twelve. Much like in the case of counting backwards from the hour, we can simply create a WFST to do this addition work for us." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "YLrabUNplwG7" - }, - "outputs": [], - "source": [ - "hour_to_night = pynini.string_map([\n", - " (\"1\", \"13\"),\n", - " (\"2\", \"14\"),\n", - " (\"3\", \"15\"),\n", - " (\"4\", \"16\"),\n", - " (\"5\", \"17\"),\n", - " (\"6\", \"18\"),\n", - " (\"7\", \"19\"),\n", - " (\"8\", \"20\"),\n", - " (\"9\", \"21\"),\n", - " (\"10\", \"22\"),\n", - " (\"11\", \"23\"), # Note that 12 and 24 would be phrased \"midi\" and \"minuit\" respectively\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "X0-z-qJAmIiI" - }, - "source": [ - "We then create an alternate graph where this conversion is mapped onto the hours function - given a post-noon suffix - and create a union with our earlier graph:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "8CdEmo9NmN7u" - }, - "outputs": [], - "source": [ - "night_suffixes = pynutil.delete(\"suffix: \\\"pm\\\"\")\n", - "graph |= (\n", - " hour @ hour_to_night\n", - " + delete_extra_space\n", - " + pynutil.insert(\"h\")\n", - " + minute.ques\n", - " + delete_space\n", - " + night_suffixes\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "YnoIkZBqmaTo" - }, - "source": [ - "Giving us a final Verbalizer of:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZfXimvFBmdDD" - }, - "outputs": [], - "source": [ - "class TimeFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"time\", kind=\"verbalize\")\n", - "\n", - " hour_to_night = pynini.string_map([\n", - " (\"1\", \"13\"),\n", - " (\"2\", \"14\"),\n", - " (\"3\", \"15\"),\n", - " (\"4\", \"16\"),\n", - " (\"5\", \"17\"),\n", - " (\"6\", \"18\"),\n", - " (\"7\", \"19\"),\n", - " (\"8\", \"20\"),\n", - " (\"9\", \"21\"),\n", - " (\"10\", \"22\"),\n", - " (\"11\", \"23\"),\n", - "])\n", - "\n", - " day_suffixes = pynutil.delete(\"suffix: \\\"am\\\"\")\n", - " night_suffixes = pynutil.delete(\"suffix: \\\"pm\\\"\")\n", - "\n", - " hour = (\n", - " pynutil.delete(\"hours:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1, 2)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " minute = (\n", - " pynutil.delete(\"minutes:\")\n", - " + delete_extra_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_DIGIT, 1, 2)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - "\n", - " graph = hour + delete_extra_space + pynutil.insert(\"h\") + minute.ques + delete_space + day_suffixes.ques\n", - "\n", - " graph |= (\n", - " hour @ hour_to_night\n", - " + delete_extra_space\n", - " + pynutil.insert(\"h\")\n", - " + minute.ques\n", - " + delete_space\n", - " + night_suffixes\n", - " )\n", - " delete_tokens = self.delete_tokens(graph)\n", - " self.fst = delete_tokens.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "e5tPcCaSYuhY" - }, - "source": [ - "If you've noticed, the Verbalizer process has become simpler as we've progressed through our WFSTs. Commonly, you will seldom need to even provide the amount of overhead we've seen in `TimeFst`, `MoneyFst`, and `OrdinalFst`, and the majority of this component is simply removing tokens as an intermediary step, as we'll see for our Name class." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "iHmRe3UIhyIH" - }, - "source": [ - "# WhiteList WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8kMn2qB9bVFy" - }, - "source": [ - "\n", - "While developing your grammars, you may encounter tokens that refuse standard categorization and yet still require normalization. For example, you may need to render \"Mister Brown\" as `Mr. Brown` or \"H M S Nelson\" as `H.M.S. Nelson`. As these cases are rather specific, they lack a regular pattern for a specific classifier. (What about \"mister\" as a token requires tokenization as opposed to \"Brown\".) Instead, we need to explicitly list their input-output mappings (i.e. a whitelist).\n", - "\n", - "For NeMo, this is performed through the `WhiteListFst`:\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6B4oPXYcccWs" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RThTLUCRceOO" - }, - "source": [ - "`WhitelistFst` is essentially just a wrapper for a `string_map` or `string_file` mapping with the appropriate formatting for deployment. Per our example, we can make a graph with the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "eIOOb_wJdMMx" - }, - "outputs": [], - "source": [ - "graph = pynini.string_map([\n", - " (\"mister\", \"mr.\"),\n", - " (\"h m s\", \"h.m.s\"),\n", - " (\"doctor\", \"dr.\")\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "O5kTXwmPZ9Tt" - }, - "source": [ - "As previously mentioned, here is where the use of `string_file` will make maintenance much easier. Discovering whitelist mappings is an iterative process and you will more than likely need to return to your list throughout development. For instance, it may be obvious that tokens such as \"madame\", \"miss\", \"esquire\", but would you think of providing abbreviations for \"the right honorable\" or \"tennessee valley authority\"? Keeping a tsv file available for quick insertions greatly assists here." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RC5Cf-Z8dYVk" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "144nvAHEdfBJ" - }, - "source": [ - "Unlike for our other WFSTs, There is no specific semiotic class for `WhiteListFst`. It instead falls under the default Name class to designate there is no need for further processing beyond obligatory tokenization. Indeed, we can simply insert the token ourselves instead of calling `add_tokens`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "oPkrmg2gdznd" - }, - "outputs": [], - "source": [ - "class WhiteListFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"whitelist\", kind=\"classify\")\n", - "\n", - " whitelist = pynini.string_map([\n", - " (\"mister\", \"mr.\"),\n", - " (\"h m s\", \"h.m.s\"),\n", - " (\"doctor\", \"dr.\")])\n", - " graph = pynutil.insert(\"name: \\\"\") + convert_space(whitelist) + pynutil.insert(\"\\\"\")\n", - " self.fst = graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "B05kdSIdd2dv" - }, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Since the whitelisted token has already been rendered in the desired normalized form, all that is necessary is to strip the `name` token and render the string 'as is'. This can be done by through the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "gaq3voIYiUCA" - }, - "outputs": [], - "source": [ - "class WhiteListFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"whitelist\", kind=\"verbalize\")\n", - " graph = (\n", - " pynutil.delete(\"name:\")\n", - " + delete_space\n", - " + pynutil.delete(\"\\\"\")\n", - " + pynini.closure(NEMO_CHAR - \" \", 1)\n", - " + pynutil.delete(\"\\\"\")\n", - " )\n", - " graph = graph @ pynini.cdrewrite(pynini.cross(u\"\\u00A0\", \" \"), \"\", \"\", NEMO_SIGMA) # Removes possible null token\n", - " self.fst = graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "cUE7Gg35bWKb" - }, - "source": [ - "While the graph is largely self-explanatory, take note that the default implementation assumes a character string without spacing. If you intend to include additional formatting in your normalization (e.g. `H. M. S.` instead of `H.M.S.`), you may need to adjust the graph to expand coverage." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_o_a15Fg7niv" - }, - "source": [ - "# Word and Punctuation WFST " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zi6lP7mTmnUV" - }, - "source": [ - "Continuing with the Name class, we will conclude with the Word and Punctuation WFSTs. These are among the simplest and most crucial classes of the entire ITN system, as they classify all tokens that are not caught by other semiotic classes. Since these other tokens make up the majority of all strings your normalization system will encounter, they are essential for general functionality.\n", - "\n", - "However, they escape discussion as their function is self-evident: since they function as default classes, tokens only reach Word WFST and Punctuation WFST if they have not been accepted by the other WFSTs. As such, we can simply accept the tokens as they are, providing them a `name` tag." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9zCqczLqp5NW" - }, - "source": [ - "## Classifier" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "eUWum5U0p99c" - }, - "source": [ - "For instance, consider the entire `WordFst` Classifier in its entirety:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "CCZSTeDHofDl" - }, - "outputs": [], - "source": [ - "class WordFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"word\", kind=\"classify\")\n", - " word = pynutil.insert(\"name: \\\"\") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert(\"\\\"\")\n", - " self.fst = word.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9ys2VpjjoiEC" - }, - "source": [ - "It just processes the entire token string with the `NEMO_NOT_SPACE` utility WFST (which accepts any string that is not a space). For your language, you may simply use one of the preexisting `WordFst`.\n", - "\n", - "Depending on language, the `PunctuationFst` may require some (minimal) adjustment. Note the following:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Mnnd3PVMpF4t" - }, - "outputs": [], - "source": [ - "class PunctuationFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"punctuation\", kind=\"classify\")\n", - "\n", - " s = \"!#$%&\\'()*+,-./:;<=>?@^_`{|}~\"\n", - " punct = pynini.union(*s)\n", - "\n", - " graph = pynutil.insert(\"name: \\\"\") + punct + pynutil.insert(\"\\\"\")\n", - "\n", - " self.fst = graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_afW02LXpLtz" - }, - "source": [ - "If your language uses other punctuation than that in the `s` string (or reserves some of the punctuation as characters), you may simply edit `s` to accommodate. \n", - "\n", - "For instance, French has a unique quotation style that utilizes guillemets \"« »\". We may add their Unicode codepoints (to avoid encoding issues) to `s`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mgfZIKzVplVm" - }, - "outputs": [], - "source": [ - "class PunctuationFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"punctuation\", kind=\"classify\")\n", - "\n", - " s = \"!#$%&\\'()*+,-./:;<=>?@^_`{|}~\"\n", - " guillemets = \"\\u00AB\" + \"\\u00BB\" # quotation marks in French.\n", - " s += guillemets\n", - " punct = pynini.union(*s)\n", - "\n", - " graph = pynutil.insert(\"name: \\\"\") + punct + pynutil.insert(\"\\\"\")\n", - "\n", - " self.fst = graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Upb5-wcp_7H" - }, - "source": [ - "## Verbalizer" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ufWT1T6GqCCT" - }, - "source": [ - "Note that both `PunctuationFst` and `WordFst` both encode with the `name` property. This leaves no differentiation between the two for a Verbalizer. This makes sense as there are no particular formatting rules for them, they simply need a placeholder tag to avoid alteration between the Classifier and Verbalizer step. Once passed to the verbalizer, they are rendered as normal by simply removing the tag (this is practically identical to the WhiteListFST):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LqyhqQKZqcph" - }, - "outputs": [], - "source": [ - "class WordFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"word\", kind=\"verbalize\")\n", - " chars = pynini.closure(NEMO_CHAR - \" \", 1)\n", - " char = pynutil.delete(\"name:\") + delete_space + pynutil.delete(\"\\\"\") + chars + pynutil.delete(\"\\\"\")\n", - " graph = char @ pynini.cdrewrite(pynini.cross(u\"\\u00A0\", \" \"), \"\", \"\", NEMO_SIGMA) # Cleans up possible null character\n", - "\n", - " self.fst = graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lGbrUkcpapyi" - }, - "source": [ - "For many languages, the writing of your `WordFst` and `PunctuationFst` (both Classifiers and Verbalizers) will require no more than duplicating the preexisting grammars found in NeMo Text Processing." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "5y9jhkhQ7p4W" - }, - "source": [ - "# Other Classes " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "j1mgnISmiu-g" - }, - "source": [ - "While the preceding discussion should be suitable for development of the remaining classes, some helpful notes may be of use before continuing:\n", - "- Fraction WFST: This is the last of the 'fundamental' classes and should take priority after completion of the Decimal WFST. It operates very similarly to the Ordinal WFST in that you wish to recover the Cardinal roots for the numerator and denominator prior to tagging. Its properties are: `negative`, `integer_part`, `numerator`, and `denominator`.\n", - "- Measure WFST: Like the Money WFST, this will require management of several 'parent' WFSTS (Fraction, Cardinal, Decimal) to be suitably comprehensive. As well, you may find it more productive to find ways to compose new measurement units instead of simply listing all (e.g. micrometers, petameters, miles per hour, feet per second). Its properties are: `negative`, `units` and it allows subgraphs of the `cardinal`, `decimal`, and `fraction` classes. (This is, it allows tokenization within the tokenization.)\n", - "- Date WFST: Depending on writing conventions, this may vary in complexity. For instance, English speakers may write dates as `01/01/2021/` or `Jan. 1 2021`. Are there specific use cases where one is preferred or should you simply decide on a format? Further, you may wish to take advantage of the `preserve order` property to avoid possible unwanted verbalizations (some implementations will permit both `Jan. 1` and `1 Jan.` if not careful.) Its properties are: `month`, `day`, and `year`. \n", - "- Telephone WFST: These will be heavily dependent not only on writing conventions but even regional preference. For instance, the U.S. commonly uses a ten digit system broken into the following sequence: `###-###-####`. Meanwhile, mainland France breaks a ten digit sequence into groups of two: `##-##-##-##-##`. Take careful note of how your language's target region verbalizes these figures and leave room for some variation in development. The `telephone` class has only one property: `number_part`. \n", - "- Electronic WFST: For normalizing email addresses or urls, you will need to develop for the `electronic` class. The main concerns will be managing alphanumeric strings and parsing the reserved symbols used for protocols and domains. (How does your target language pronounce \"https://\"? www? '.' or '@'?\") Depending on whether you are normalizing a url or email, the following properties will be needed:\n", - " - email: `username`, `domain`\n", - " - url: `protocol` (Sparrowhawk allows further detail here but NeMo passes the entire url through the `protocol` property)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-i25X8mK90n3" - }, - "source": [ - "# Tokenize and Classify " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "v4bcigU6b9ss" - }, - "source": [ - "We are now ready to build a general Classifier for our entire language. Upon completion of your grammars, the next step is to unite them together in a general Classifier WFST - located within a `tokenize_and_classify.py` file, preferably. This WFST will be responsible for determining the appropriate semiotic class for each token in your string and processing the necessary properties for normalization.\n", - "\n", - "For this section, we will focus on the following: grammar composition, assignment of weights, and importing/exporting as a FAR file. Since we will need to work with some instantiated graphs, let's preload them before proceeding. (Note the compilingtime.)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.decimal import DecimalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.money import MoneyFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.ordinal import OrdinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.punctuation import PunctuationFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.time import TimeFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.whitelist import WhiteListFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.taggers.word import WordFst\n", - "\n", - "cardinal = CardinalFst()\n", - "cardinal_graph = cardinal.fst\n", - "\n", - "ordinal = OrdinalFst(cardinal)\n", - "ordinal_graph = ordinal.fst\n", - "\n", - "decimal = DecimalFst(cardinal)\n", - "decimal_graph = decimal.fst\n", - "\n", - "whitelist_graph = WhiteListFst().fst\n", - "word_graph = WordFst().fst\n", - "time_graph = TimeFst().fst\n", - "money_graph = MoneyFst(cardinal, decimal).fst\n", - "punct_graph = PunctuationFst().fst" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MIv58eSocOV1" - }, - "source": [ - "## Grammar" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "k_RPlnfVdG5E" - }, - "source": [ - "As for all previous grammars, the `tokenize_and_classify` grammar inherits from a `GraphFst` as an individual class: `ClassifyFst`. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "WHKG4c2WdW0G" - }, - "outputs": [], - "source": [ - "class ClassifyFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "j9_I6DJmdcOG" - }, - "source": [ - "This class is responsible for instantiating all subgraphs and passing necessary dependencies:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4YtmcxLOdlas" - }, - "outputs": [], - "source": [ - "class ClassifyFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", - "\n", - " cardinal = CardinalFst()\n", - " cardinal_graph = cardinal.fst\n", - "\n", - " ordinal = OrdinalFst(cardinal)\n", - " ordinal_graph = ordinal.fst\n", - "\n", - " decimal = DecimalFst(cardinal)\n", - " decimal_graph = decimal.fst\n", - "\n", - " whitelist_graph = WhiteList().fst\n", - " word_graph = WordFst().fst\n", - " time_graph = TimeFst().fst\n", - " money_graph = MoneyFst(cardinal, decimal).fst\n", - " punct_graph = PunctuationFst().fst" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "y5vGvv3HeAY9" - }, - "source": [ - "We then join all the grammars together so `ClassifyFst` can apply them. Rather unceremoniously, this is accomplished by performing a union across all grammars (excluding `PunctuationFst`, to assist tokenization). We then follow this union by inserting the `tokens` class around the resulting formatting (required for processing):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "oocgPQ5geZJO" - }, - "outputs": [], - "source": [ - "class ClassifyFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", - "\n", - " cardinal = CardinalFst()\n", - " cardinal_graph = cardinal.fst\n", - "\n", - " ordinal = OrdinalFst(cardinal)\n", - " ordinal_graph = ordinal.fst\n", - "\n", - " decimal = DecimalFst(cardinal)\n", - " decimal_graph = decimal.fst\n", - "\n", - " whitelist_graph = WhiteListFst().fst\n", - " word_graph = WordFst().fst\n", - " time_graph = TimeFst().fst\n", - " money_graph = MoneyFst(cardinal, decimal).fst\n", - " punct_graph = PunctuationFst().fst\n", - "\n", - " classify = (\n", - " time_graph\n", - " | whitelist_graph\n", - " | decimal_graph\n", - " | cardinal_graph\n", - " | ordinal_graph\n", - " | money_graph\n", - " | word_graph\n", - " )\n", - " token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ASWDXWQjfLEU" - }, - "source": [ - "Our graph is now able to process an individual token. But what about a string? Here you will need to be mindful of the tokenization behavior for your language and decide on your desired treatment of punctuation (hence exclusion from the main graph). \n", - "\n", - "For our purposes, we will assume the convention of space and punctuation serving as token separators. We graph punctuation as individual tokens" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "r6WztK2jwhFt" - }, - "outputs": [], - "source": [ - "punct_graph = PunctuationFst().fst\n", - "punct = pynutil.insert(\"tokens { \") + punct_graph + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9T2rT89jw3T1" - }, - "source": [ - "and join the `punct` graph with our `tokens` graph (inserting spaces between tokens for formatting)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rGtVOK-txKOP" - }, - "outputs": [], - "source": [ - "token = \"PLACEHOLDER\"\n", - "token_plus_punct = (\n", - " pynini.closure(punct + pynutil.insert(\" \")) + token + pynini.closure(pynutil.insert(\" \") + punct)\n", - " ) # Note the use of closure incase there are multiple punctuations\n", - "graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_gixfQ69xWPe" - }, - "source": [ - "then address space between tokens: \n", - "\n", - "`graph = delete_space + graph + delete_space`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DWnmazWecyUG" - }, - "source": [ - "## Weighting " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "egHbwIbMx-hT" - }, - "source": [ - "Were we to leave our `ClassifyFst` like this, we would undoubtedly encounter a mountain of errors. What will stop our graph from treating punctuation that is part of a previous grammar as a token separator (e.g. \"vingt-et-un\")? How do we ensure that a currency string isn't treated as solely a decimal string with a `name` token following?\n", - "\n", - "As in previous cases, the solution lies in our choice of weights for the grammar." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "y3U7_M8CyxZ1" - }, - "source": [ - "Let us return to the main graph:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "9VXe1dfsy3Be" - }, - "outputs": [], - "source": [ - "classify = (\n", - " time_graph\n", - " | whitelist_graph\n", - " | decimal_graph\n", - " | cardinal_graph\n", - " | ordinal_graph\n", - " | money_graph\n", - " | word_graph\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + punct_graph + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aY4vOFqxy5ua" - }, - "source": [ - "Beyond the path weights that we explicitly added, these graphs are currently weightless. Since we want the graphs themselves to be the general determiners of a path, let us use some default weights an order of magnitude beyond our path weights (we use `pynutil.add_weight`):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "bthyt_Le2rsA" - }, - "outputs": [], - "source": [ - "classify = (\n", - " pynutil.add_weight(time_graph, 1)\n", - " | pynutil.add_weight(whitelist_graph, 1)\n", - " | pynutil.add_weight(decimal_graph, 1)\n", - " | pynutil.add_weight(cardinal_graph, 1)\n", - " | pynutil.add_weight(ordinal_graph, 1)\n", - " | pynutil.add_weight(money_graph, 1)\n", - " | pynutil.add_weight(word_graph, 1)\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1) + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xMNIJbzj3MMP" - }, - "source": [ - "Let's see what logical adjustments should be made. First off, we know that we want each class token to span the largest string possible. (e.g. We don't want \"quatre-vingt\" to be rendered as two `cardinal` classes with a hyphen in between.) As such, we want to penalize our graph for using more than one tokens. We can do so by establishing the following constraint: the sum of two or more tokens cannot be less than the weight of a single token. Or, for any pair of tokens `w_1` and `w_2`, their sum must always be greater than any other individual token (including themselves):\n", - "\n", - "`w_1 + w_2 > k >= w`\n", - "\n", - "To keep things simple, let us make the upper limit `2`. This means we should increase all the weights to keep our constraint:\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "classify = (\n", - " pynutil.add_weight(time_graph, 1.1)\n", - " | pynutil.add_weight(whitelist_graph, 1.1)\n", - " | pynutil.add_weight(decimal_graph, 1.1)\n", - " | pynutil.add_weight(cardinal_graph, 1.1)\n", - " | pynutil.add_weight(ordinal_graph, 1.1)\n", - " | pynutil.add_weight(money_graph, 1.1)\n", - " | pynutil.add_weight(word_graph, 1.1)\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Do we want this constraint to include all tokens? Imagine if we had a string of multiple semiotic tokens in a row. Since this string's combined weight would be larger than any single class token, a grammar that served as a universal acceptor (i.e. `word_graph`) would be preferred over these individual classes. This would be obviously incorrect. As such, we want to make sure that `word_graph` would only be traversed when there is truly no other option:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qc_CU2ro63eg" - }, - "outputs": [], - "source": [ - "classify = (\n", - " pynutil.add_weight(time_graph, 1.1)\n", - " | pynutil.add_weight(whitelist_graph, 1.1)\n", - " | pynutil.add_weight(decimal_graph, 1.1)\n", - " | pynutil.add_weight(cardinal_graph, 1.1)\n", - " | pynutil.add_weight(ordinal_graph, 1.1)\n", - " | pynutil.add_weight(money_graph, 1.1)\n", - " | pynutil.add_weight(word_graph, 100)\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, even with a string of fifty different class tokens, `word_graph` would still not be considered as a path to traverse." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fW8C3vD-7Dbl" - }, - "source": [ - "Next, let us consider our foundational graph: `cardinal_graph`. As Cardinals occur in practically all our WFSTs, it's possible for `cardinal_graph` to apply in almost all cases. Yet, we've specifically invoked `CardinalFST` when it was required in any of the other classes, so it will never be needed in any of those cases. This means that we want all those graphs to have *priority* over `cardinal_graph`. As such, we will increase its weight so it takes second lowest precedence (while still paying attention to the combined weight constraint). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "97UwGaEn8pj7" - }, - "outputs": [], - "source": [ - "classify = (\n", - " pynutil.add_weight(time_graph, 1.1)\n", - " | pynutil.add_weight(whitelist_graph, 1.1)\n", - " | pynutil.add_weight(decimal_graph, 1.1)\n", - " | pynutil.add_weight(cardinal_graph, 1.2)\n", - " | pynutil.add_weight(ordinal_graph, 1.1)\n", - " | pynutil.add_weight(money_graph, 1.1)\n", - " | pynutil.add_weight(word_graph, 100)\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0d9Lw4Ot88_B" - }, - "source": [ - "This form of thinking can be applied to all the 'foundational' graphs you may develop: the dependent graphs should take higher precedence than the graphs they borrow from. For instance, since `money_graph` utilizes `decimal_graph`, we know it should take precedence. However, since `decimal_graph` borrows from `cardinal_graph`, its weight must still be less than `1.2`. As such: " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "-wF8cgLK9tpU" - }, - "outputs": [], - "source": [ - "classify = (\n", - " pynutil.add_weight(time_graph, 1)\n", - " | pynutil.add_weight(whitelist_graph, 1)\n", - " | pynutil.add_weight(decimal_graph, 1.1)\n", - " | pynutil.add_weight(cardinal_graph, 1.2)\n", - " | pynutil.add_weight(ordinal_graph, 1)\n", - " | pynutil.add_weight(money_graph, 1.09)\n", - " | pynutil.add_weight(word_graph, 100)\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1) + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "huMzDoZ2-FD2" - }, - "source": [ - "For those classes that don't seem affected, we can set their weights as the same as those below their 'foundation' graphs, simply to prevent prioritization when not required\n", - "\n", - "Meanwhile, `whitelist_graph` should take precedence over all others, as it may contain unique normalizations that may get accidentally caught by the other graphs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "gWG6ttyd-bbD" - }, - "outputs": [], - "source": [ - "classify = (\n", - " pynutil.add_weight(time_graph, 1.1)\n", - " | pynutil.add_weight(whitelist_graph, 1.07)\n", - " | pynutil.add_weight(decimal_graph, 1.1)\n", - " | pynutil.add_weight(cardinal_graph, 1.2)\n", - " | pynutil.add_weight(ordinal_graph, 1.1)\n", - " | pynutil.add_weight(money_graph, 1.08)\n", - " | pynutil.add_weight(word_graph, 100)\n", - " )\n", - "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1TH08f8O-fWx" - }, - "source": [ - "Keep in mind that building weights in this manner is hardly a rule for grammar development and is instead intended as a means to initialize weights for empirical development. You will find that actual strings will cause unexpected behavior that require fine tuning. \n", - "\n", - "For instance, the Classifier for French in NeMo ITN benefits from having varying precedence for some weights, as seen in the following excerpt:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "gKdkyDK3_r46" - }, - "outputs": [], - "source": [ - "class ClassifyFst(GraphFst):\n", - " \"\"\"\n", - " Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.\n", - " For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. \n", - " More details to deployment at NeMo/tools/text_processing_deployment.\n", - "\n", - " Args:\n", - " cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.\n", - " overwrite_cache: set to True to overwrite .far files\n", - " \"\"\"\n", - "\n", - " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", - "\n", - " far_file = None\n", - " if cache_dir is not None and cache_dir != \"None\":\n", - " os.makedirs(cache_dir, exist_ok=True)\n", - " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", - " if not overwrite_cache and far_file and os.path.exists(far_file):\n", - " self.fst = pynini.Far(far_file, mode=\"r\")[\"tokenize_and_classify\"]\n", - " logging.info(f\"ClassifyFst.fst was restored from {far_file}.\")\n", - " else:\n", - " logging.info(f\"Creating ClassifyFst grammars.\")\n", - "\n", - " cardinal = CardinalFst()\n", - " cardinal_graph = cardinal.fst\n", - "\n", - " fraction = FractionFst(cardinal)\n", - " fraction_graph = fraction.fst\n", - "\n", - " ordinal = OrdinalFst(cardinal)\n", - " ordinal_graph = ordinal.fst\n", - "\n", - " decimal = DecimalFst(cardinal)\n", - " decimal_graph = decimal.fst\n", - "\n", - " measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction).fst\n", - " date_graph = DateFst(cardinal).fst\n", - " word_graph = WordFst().fst\n", - " time_graph = TimeFst().fst\n", - " money_graph = MoneyFst(cardinal, decimal).fst\n", - " whitelist_graph = WhiteListFst().fst\n", - " punct_graph = PunctuationFst().fst\n", - " electronic_graph = ElectronicFst().fst\n", - " telephone_graph = TelephoneFst().fst\n", - "\n", - " classify = (\n", - " pynutil.add_weight(whitelist_graph, 1.01)\n", - " | pynutil.add_weight(time_graph, 1.05)\n", - " | pynutil.add_weight(date_graph, 1.09)\n", - " | pynutil.add_weight(decimal_graph, 1.08)\n", - " | pynutil.add_weight(measure_graph, 1.1)\n", - " | pynutil.add_weight(cardinal_graph, 1.1)\n", - " | pynutil.add_weight(ordinal_graph, 1.1)\n", - " | pynutil.add_weight(fraction_graph, 1.09)\n", - " | pynutil.add_weight(money_graph, 1.07)\n", - " | pynutil.add_weight(telephone_graph, 1.1)\n", - " | pynutil.add_weight(electronic_graph, 1.1)\n", - " | pynutil.add_weight(word_graph, 100)\n", - " )\n", - "\n", - " punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(\" }\")\n", - " token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")\n", - " token_plus_punct = (\n", - " pynini.closure(punct + pynutil.insert(\" \")) + token + pynini.closure(pynutil.insert(\" \") + punct)\n", - " )\n", - "\n", - " graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)\n", - " graph = delete_space + graph + delete_space\n", - "\n", - " self.fst = graph.optimize()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qc4B_0rNcQZu" - }, - "source": [ - "## FAR import/export" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0nRRPvy-AYsA" - }, - "source": [ - "While working through these code excerpts, you may have noticed some latency with each instantiation of our WFSTs (notably wherever `CardinalFst` was involved). This is because the `pynini.optimize` that we call with each graph's instantiation is computationally expensive. For our ultimate purpose of deployment, it seems a waste of resources to recreate stable graphs for each use.\n", - "\n", - "To address this, NeMo ITN supports WFST caching through use of `pynini.Far`, storing and recovering Classify grammars as FAR (Fst ARchives).\n", - "\n", - "Let us update our `ClassifyFst` to permit passing a cache and allowing overwriting (for development):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5XgWevUzD1AE" - }, - "outputs": [], - "source": [ - "class ClassifyFst(GraphFst):\n", - " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "l28GMR70ESz0" - }, - "source": [ - "For storing our graphs as FARs, we can use `graph_utils.generator_main`, which saves our WFSTs by type for easier management. For arguments it takes a string name and a dict mapping of WFST type to graph:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AzTkcmAWFLYm" - }, - "outputs": [], - "source": [ - "import os\n", - "\n", - "class ClassifyFst(GraphFst):\n", - " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", - " # Grammar here\n", - " # ....\n", - " if cache_dir is not None and cache_dir != \"None\":\n", - " os.makedirs(cache_dir, exist_ok=True)\n", - " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", - " generator_main(far_file, {\"tokenize_and_classify\": self.fst})" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Wz8wjCQSD6eJ" - }, - "source": [ - "We pair this with the ability to load from cache (note the `\"tokenize_and_classify\"` key being passed):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FRFYgMmuD_53" - }, - "outputs": [], - "source": [ - "import os\n", - "\n", - "class ClassifyFst(GraphFst):\n", - " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", - " if not overwrite_cache and far_file and os.path.exists(far_file):\n", - " self.fst = pynini.Far(far_file, mode=\"r\")[\"tokenize_and_classify\"]\n", - " else:\n", - " # Grammar here\n", - " # ....\n", - " if cache_dir is not None and cache_dir != \"None\":\n", - " os.makedirs(cache_dir, exist_ok=True)\n", - " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", - " generator_main(far_file, {\"tokenize_and_classify\": self.fst})\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ib9nggZxF38s" - }, - "source": [ - "Producing our `ClassifyFst` as:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "d2BZyx6sGGg2" - }, - "outputs": [], - "source": [ - "class ClassifyFst(GraphFst):\n", - " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", - " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", - "\n", - " far_file = None\n", - " if cache_dir is not None and cache_dir != \"None\":\n", - " os.makedirs(cache_dir, exist_ok=True)\n", - " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", - " if not overwrite_cache and far_file and os.path.exists(far_file):\n", - " self.fst = pynini.Far(far_file, mode=\"r\")[\"tokenize_and_classify\"]\n", - " else:\n", - " cardinal = CardinalFst()\n", - " cardinal_graph = cardinal.fst\n", - "\n", - " ordinal = OrdinalFst(cardinal)\n", - " ordinal_graph = ordinal.fst\n", - "\n", - " decimal = DecimalFst(cardinal)\n", - " decimal_graph = decimal.fst\n", - "\n", - " whitelist_graph = WhiteList().fst\n", - " word_graph = WordFst().fst\n", - " time_graph = TimeFst().fst\n", - " money_graph = MoneyFst(cardinal, decimal).fst\n", - " whitelist_graph = WhiteListFst().fst\n", - " punct_graph = PunctuationFst().fst\n", - "\n", - " classify = (\n", - " pynutil.add_weight(time_graph, 1.1)\n", - " | pynutil.add_weight(whitelist_graph, 1.01)\n", - " | pynutil.add_weight(decimal_graph, 1.09)\n", - " | pynutil.add_weight(cardinal_graph, 1.1)\n", - " | pynutil.add_weight(ordinal_graph, 1.09)\n", - " | pynutil.add_weight(money_graph, 1.08)\n", - " | pynutil.add_weight(word_graph, 100)\n", - " )\n", - "\n", - " punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(\" }\")\n", - " token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")\n", - " token_plus_punct = (\n", - " pynini.closure(punct + pynutil.insert(\" \")) + token + pynini.closure(pynutil.insert(\" \") + punct)\n", - " )\n", - "\n", - " graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)\n", - " graph = delete_space + graph + delete_space\n", - "\n", - " self.fst = graph.optimize()\n", - "\n", - " if far_file:\n", - " generator_main(far_file, {\"tokenize_and_classify\": self.fst})" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nEhY6wKKtfhn" - }, - "source": [ - "You should find the caching to vastly speed up compilingtime." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rTtCnC5w95CI" - }, - "source": [ - "# Verbalize and Verbalize Final " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "H9y5yuk1HaGj" - }, - "source": [ - "Our last step is to create a universal Verbalizer for all classes. This is very similar to development of `ClassifierFst`, except that the Verbalizer breaks its normalization task into two components:\n", - "- `VerbalizeFst`, which removes formatting for each token\n", - "- `VerbalizeFinalFst`, which extends `VerbalizeFst` across all tokens in a string\n", - "Why two componenets when `tokenize_and_classify` was one? Because Sparrowhawk performs all the functionality of `VerbalizeFinalFst`, so its inclusion would break deployment. However, without it, your NeMo grammar would be unable to function at base. So we separate the two to allow the best of both world." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "vUawTJVuH8iR" - }, - "source": [ - "## VerbalizeFst" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xghiBV06IIWU" - }, - "source": [ - "Much like `ClassifyFst`, `VerbalizeFst` instantiates all its subgraphs and then joins them together under a union operation. However, it does not need to employ weighting. Why? Because `ClassifyFst` has assigned each token a specific class. As each class is unique, there is no possibility that a subgraph will be employed for the wrong token.\n", - "\n", - "As such, our `VerbalizeFst` is formed by a simple union operation across all previous Verbalizer graphs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "uMVCqCvsIt2v" - }, - "outputs": [], - "source": [ - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.cardinal import CardinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.decimal import DecimalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.money import MoneyFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.ordinal import OrdinalFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.time import TimeFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.whitelist import WhiteListFst\n", - "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.word import WordFst\n", - "\n", - "class VerbalizeFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"verbalize\", kind=\"verbalize\")\n", - " cardinal = CardinalFst()\n", - " cardinal_graph = cardinal.fst\n", - " ordinal_graph = OrdinalFst().fst\n", - " decimal = DecimalFst()\n", - " decimal_graph = decimal.fst\n", - " whitelist_graph = WhiteListFst().fst\n", - " money_graph = MoneyFst(decimal=decimal).fst\n", - " time_graph = TimeFst().fst\n", - " graph = (\n", - " time_graph\n", - " | whitelist_graph\n", - " | money_graph\n", - " | ordinal_graph\n", - " | decimal_graph\n", - " | cardinal_graph\n", - " )\n", - " self.fst = graph" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Wap-LU6EI2Iu" - }, - "source": [ - "## Verbalize Final" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TYaEt_0tI47t" - }, - "source": [ - "With `VerbalizeFst` complete, we now extend our graph to cover any series of tokens. All this requires is deletion of the `tokens` formatting (note the absence of such in our previous graph) and use of closure for any series of one or more tokens.\n", - "\n", - "This provides the following graph:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "L-9lJNE6JPCW" - }, - "outputs": [], - "source": [ - "\n", - "class VerbalizeFinalFst(GraphFst):\n", - " def __init__(self):\n", - " super().__init__(name=\"verbalize_final\", kind=\"verbalize\")\n", - " verbalize = VerbalizeFst().fst\n", - " word = WordFst().fst\n", - " types = verbalize | word\n", - " graph = (\n", - " pynutil.delete(\"tokens\")\n", - " + delete_space\n", - " + pynutil.delete(\"{\")\n", - " + delete_space\n", - " + types\n", - " + delete_space\n", - " + pynutil.delete(\"}\")\n", - " )\n", - " graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space\n", - " self.fst = graph" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "WwMKFw-QJVgm" - }, - "source": [ - "Unlike `ClassifyFst`, NeMo ITN does not cache `VerbalizeFst` or `VerbalizeFinalFst`. (While you are welcome to provide such functionality in your own development, keep in mind that the limited complexity of our Verbalizers makes compilingtimes less significant.)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7U21AZearZMK" - }, - "source": [ - "# Deployment " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VrSccoh9K6JK" - }, - "source": [ - "Now that we have done all the groundwork, we can finally move to deployment. This final section will just cover the minor code alterations required to call your language through NeMo ITN and deploy through Sparrowhawk. For further information on using NeMo ITN, please see [this tutorial](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_Text_Normalization.ipynb). " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0Le2aJvFIAKd" - }, - "source": [ - "## InverseNormalize" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "r2R3TUCDLi5-" - }, - "source": [ - "NeMo calls upon the `InverseNormalizer` class for all ITN tasks. Given a string and language, it will instantiate both the `ClassifierFst` and `VerbalizeFst` respective for the given language. (Note: we do not use `VerbalizeFinal` as its functions are managed by Sparrowhawk.) To make your language deployable in the general NeMo ITN system, you must designate the availability of these classes for instantiation. (For more information, see the [source code](https://github.com/NVIDIA/NeMo/blob/main/nemo_text_processing/inverse_text_normalization/inverse_normalize.py).)\n", - "\n", - "To do so requires only two changes. The first is providing a string to identify your language as an option for `parse_args` ([ISO codes are advised](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "tfv4Ee3ML-Fg" - }, - "outputs": [], - "source": [ - "def parse_args():\n", - " parser = ArgumentParser()\n", - " parser.add_argument(\"input_string\", help=\"input string\", type=str)\n", - " parser.add_argument(\"--language\", help=\"language\", choices=['en', 'de', 'es', 'ru', 'fr', 'MY_LANGUAGE'], default=\"en\", type=str)\n", - " parser.add_argument(\"--verbose\", help=\"print info for debugging\", action='store_true')\n", - " parser.add_argument(\"--overwrite_cache\", help=\"set to True to re-create .far grammar files\", action=\"store_true\")\n", - " parser.add_argument(\n", - " \"--cache_dir\",\n", - " help=\"path to a dir with .far grammar file. Set to None to avoid using cache\",\n", - " default=None,\n", - " type=str,\n", - " )\n", - " return parser.parse_args()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "awVl5nAsMUTl" - }, - "source": [ - "The next is to call your `ClassifyFst` and `VerbalizeFst` from `__init__`:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class InverseNormalizer(Normalizer):\n", - " def __init__(self, lang: str = 'en', cache_dir: str = None, overwrite_cache: bool = False):\n", - "\n", - " if lang == 'en':\n", - " from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst\n", - " from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import (\n", - " VerbalizeFinalFst,\n", - " )\n", - " # Other languages\n", - " # ....\n", - " elif lang == 'MY_LANGUAGE':\n", - "\n", - " from nemo_text_processing.inverse_text_normalization.MY_LANGUAGE.taggers.tokenize_and_classify import ClassifyFst\n", - "\n", - " from nemo_text_processing.inverse_text_normalization.MY_LANGUAGE.verbalizers.verbalize_final import (\n", - "\n", - " VerbalizeFst,\n", - "\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TI1PuejLMxdI" - }, - "source": [ - "And you're done! NeMo will handle the rest. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xrksINQoICfj" - }, - "source": [ - "## Sparrowhawk" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "rP9-dmMJSg3h" - }, - "source": [ - "Sparrowhawk is an open-source implementation of Google's Kestrel Text Normalization system. Functionally it operates similar to NeMo ITN (the two-step Classify and Verbalize functions stem from [intentional NeMo integration](https://arxiv.org/pdf/2104.05055.pdf) but is better optimized for backend deployment. \n", - "\n", - "Like the preceding section, this portion of the tutorial will highlight a few necessary edits so you may deploy your normalization system." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "u1eGMGxkVZmM" - }, - "source": [ - "### Grammar Export" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "v9dr0E-uVgoT" - }, - "source": [ - "The first step in deploying your grammar is by exporting both `ClassifyFst` and `VerbalizeFst` WFST as FAR files. This is done through `pynini_export.py`, found in `NeMo/tools/text_processing_deployment`. To allow export of your grammar, we must make the similar edits as wed did for `inverse_normalize.py`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qtek2bMMWbMj" - }, - "source": [ - "First append your language to the list of accepted strings in `parse_args`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5pTGX9YAWiTZ" - }, - "outputs": [], - "source": [ - "\n", - "def parse_args():\n", - " parser = ArgumentParser()\n", - " parser.add_argument(\"--output_dir\", help=\"output directory for grammars\", required=True, type=str)\n", - " parser.add_argument(\"--language\", help=\"language\", choices=[\"en\", \"de\", \"es\", \"ru\", 'fr', 'MY_LANGUAGE'], type=str, default='en')\n", - " parser.add_argument(\n", - " \"--grammars\", help=\"grammars to be exported\", choices=[\"tn_grammars\", \"itn_grammars\"], type=str, required=True\n", - " )\n", - " parser.add_argument(\n", - " \"--input_case\", help=\"input capitalization\", choices=[\"lower_cased\", \"cased\"], default=\"cased\", type=str\n", - " )\n", - " parser.add_argument(\"--overwrite_cache\", help=\"set to True to re-create .far grammar files\", action=\"store_true\")\n", - " parser.add_argument(\n", - " \"--cache_dir\",\n", - " help=\"path to a dir with .far grammar file. Set to None to avoid using cache\",\n", - " default=None,\n", - " type=str,\n", - " )\n", - " return parser.parse_args()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Fm3CTmdLWlUt" - }, - "source": [ - "And then call `ClassifyFst` and `VerbalizeFinalFst` in `main`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "LANG=\"FOO\"\n", - "\n", - "if LANG == 'en':\n", - " from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import (\n", - " ClassifyFst as ITNClassifyFst,\n", - " )\n", - " from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import (\n", - " VerbalizeFst as ITNVerbalizeFst,\n", - " )\n", - "# Other languages\n", - "# ...\n", - "elif LANG == 'MY_LANGUAGE':\n", - " from nemo_text_processing.inverse_text_normalization.MY_LANGUAGE.taggers.tokenize_and_classify import (\n", - " ClassifyFst as ITNClassifyFst,\n", - " )\n", - " from nemo_text_processing.inverse_text_normalization.MY_LANGUAGE.verbalizers.verbalize import (\n", - " VerbalizeFst as ITNVerbalizeFst,\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JFgGhCMMW3UQ" - }, - "source": [ - "### Deployment" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "V8RH0aGbW41U" - }, - "source": [ - "By default, NeMo ITN is structured to allow deployment through a Docker based backend. This involved building a container from file, exporting your grammars to the container and then deploying Sparrowhawk for processing.\n", - "\n", - "NeMo automates this entire process through `export_grammars.sh`, which will automatically compile your grammars for deployment (assuming you edited `pynini_export` appropriately) and mount them in a container for you. For our purposes, `export_grammar` only requires the following arguments:\n", - "- `LANGUAGE` - the string you have used throughout to indicate your language\n", - "- `GRAMMARS` - only accepts `itn_grammars`(Inverse Text Normalization) or `tn_grammars` (Text Normalization)\n", - "\n", - "For instance, we would call our French ITN with:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KYdbawAfZIco" - }, - "source": [ - "`bash export_grammar.sh --GRAMMARS=itn_grammars --LANGUAGE={LANGUAGE}`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "UXVr2twdZMO2" - }, - "source": [ - "Which will return the Docker prompt for further normalization." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TDoVUxCE-Dax" - }, - "source": [ - "# Final Notes" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Fw-9mU7ql8iY" - }, - "source": [ - "Congratulations, you have now constructed an entire ITN system from the ground up! While your experience will vary with each language, you will find several commonalities that will assist you in further development. \n", - "\n", - "If you are interested in working further with your language WFSTs, you may wish to construct a TN system. Broadly, this is accomplished by inverting your previous graphs (`pynini.invert` may assist here) and changing your outputs to avoid indeterminacy (i.e. decide on one canonical output for your grammar for each class). But outside of such grammar specific edits, you repeat many of the steps exhibited here, such as:\n", - "- Use of a two step classifier-verbalizer system\n", - "- Same semiotic classes for tagging\n", - "- Inheritance of `GraphFst`\n", - "- Minor import edits to `pynini_export` and `export_grammar`" - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "WFST Tutorial.ipynb", - "provenance": [], - "toc_visible": true - }, - "interpreter": { - "hash": "fbc643a332f9d7801191710b24a8a955d342df4f32791f7fb65121dc4784751f" - }, - "kernelspec": { - "display_name": "Python 3.9.7 64-bit ('wfst_tutorial': conda)", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" }, - "nbformat": 4, - "nbformat_minor": 1 -} \ No newline at end of file + "id": "Qq1Hz6CKWdwl", + "outputId": "3d8f5bd6-f10e-431d-9039-eb88164fbb95" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Optional: Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Prerequisites:\n", + "1. Please make sure to read the [Text Processing Documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html) and [Text Normalization Introduction Tutorial](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb) **before** this notebook. This notebook is a in-depth tutorial on how to customize and develop your own text normalization or inverse text normalization grammars.\n", + "2. download NeMo source code" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Install NeMo, which installs both nemo and nemo_text_processing package\n", + "BRANCH = 'r1.9.0'\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nemo_text_processing]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pynini\n", + "import nemo_text_processing\n", + "from pynini.lib import pynutil" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, NEMO_DIGIT, delete_space, NEMO_SIGMA, NEMO_NOT_QUOTE, delete_extra_space, NEMO_NON_BREAKING_SPACE\n", + "from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.decimal import DecimalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.money import MoneyFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.ordinal import OrdinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.punctuation import PunctuationFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.time import TimeFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.whitelist import WhiteListFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.word import WordFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.cardinal import CardinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.decimal import DecimalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.money import MoneyFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.ordinal import OrdinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.time import TimeFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.whitelist import WhiteListFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.word import WordFst\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T0JxcvuPHvn9" + }, + "source": [ + "NeMo's Text Processing module uses Weighted Finite State Transducers (WFST) to deploy grammars for both efficient text normalization (TN) and inverse text normalization (ITN). In this tutorial, you will learn to build a normalization grammar from the ground up to use in your own text processing tasks. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Table of Contents\n", + "- WFSTs\n", + "- NeMo Inverse Text Processing\n", + "- Getting Started\n", + "- Cardinal WFST\n", + "- Ordinal WFST\n", + "- Decimal WFST\n", + "- Money WFST\n", + "- Time WFST\n", + "- WhiteList WFST\n", + "- Word and Punctuation WFST\n", + "- Other Classes\n", + "- Tokenize and Classify\n", + "- Verbalize and Verbalize Final\n", + "- Deployment" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lMUovcMsfXyI" + }, + "source": [ + "# WFSTs " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Y1ejNMLbH1jM" + }, + "source": [ + "WFSTs are a form of [Finite State Machines](https://en.wikipedia.org/wiki/Finite-state_machine) used to graph relations between regular languages (or [regular expressions](https://en.wikipedia.org/wiki/Regular_expression)). For our purposes, they can be defined by two major properties:\n", + "\n", + "1. Mappings between accepted input and output expressions for text substitution\n", + "2. Path weighting to direct graph traversal" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nNg45ZuaP_A8" + }, + "source": [ + "For example, consider a simple normalization task of mapping the word \"cent\" (French for \"one hundred\") to the numerical representation `100`. We would begin with a Finite State representation of the regex `/cent/`:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uxo7gUkW_XKT" + }, + "source": [ + "![cent.png](images/cent.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fahsjMVFlbCa" + }, + "source": [ + "And then create a mapping to the text string `100`:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IMJ-fNSk_jXC" + }, + "source": [ + "![cent_to_100.png](images/cent_to_100.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bPKW0I4yAGUb" + }, + "source": [ + "*Note: Null characters are expressed as `ε` by convention*" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_0NK3aW5nG9C" + }, + "source": [ + "This would give us a WFST with universal path weights. (By default, `pynini` uses [tropical semirings](https://en.wikipedia.org/wiki/Tropical_semiring) for arcs, giving each arc a default weight of `0`.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CzBc9D3qTGJ-" + }, + "source": [ + "Now, let us consider expanding our model. To indicate values between `100` and `200`, French uses the number scheme of `cent + digit`. For example, `120` would be pronounced as \"cent-vingt\". To create the appropriate output string, we would now want to map \"cent\" to `1` and the remaining aspect of our string to the appropriate digit representation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GRrKNQRjFDoL" + }, + "source": [ + "![cent_vingt_to_120.png](images/cent_vingt_to_120.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jLpm4mufAfUz" + }, + "source": [ + "However this would make our graph [non-deterministic](https://en.wikipedia.org/wiki/Nondeterministic_algorithm) - it will have multiple possibilities for termination. Now an input of \"cent-vingt\" could have the outcome of `100` or `10020` when only one is correct. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![cent_vingt_bad.png](images/cent_vingt_bad.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "c-GJTpgIAf7S" + }, + "source": [ + "To correct this, we may add a new end state and a weight to the path that accepts the input without `s`:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6GJcsdttGg_S" + }, + "source": [ + "![cent_vingt_good.png](images/cent_vingt_good.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mHft1gzsAipc" + }, + "source": [ + "Now, we can guarantee an ideal mapping by relying on a shortest-path (smallest-weight) heuristic: traversal of the graph will prioritize longer inputs, only converting \"cent\" to `100` when a larger input isn't available. As such, we've now removed the undesired output `10020` while preserving our desired coverage in string mapping. \n", + "\n", + "This use of weights to ensure predictable behavior allows WFSTs to exploit the efficiency of standard graph traversal algorithms while also maintaining versatility. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8Ik4PBXafSSB" + }, + "source": [ + "# NeMo Inverse Text Processing " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b2fcWKhqYVF5" + }, + "source": [ + "Following [Google's Kestrel](https://www.researchgate.net/publication/277932107_The_Kestrel_TTS_text_normalization_system) framework, NeMo deploys two composite WFSTs for text normalization. They are as follows:\n", + "1. A *classifier* (or tagger) to label potential tokens by 'semiotic class' (e.g. currency, ordinal number, street address)\n", + "2. A *verbalizer* to render a tagged token in conventional written form\n", + "\n", + "For example, consider the sentence: <>\n", + "\n", + "For an ITN task, a tokenizer would identify the following tokens:\n", + "\n", + "`[\"le\" ,\"premier\", \"juillet\", \"il\", \"a\", \"mangé\", \"trente-cinq\", \"pommes\"]`\n", + "\n", + "and provide each a class token: \n", + "\n", + "- `tokens { name: \"le\" }`\n", + "- `tokens { date { day: \"1\" month: \"juillet\" } } ` \n", + "- `tokens { name: \"il\" }` \n", + "- `tokens { name: \"a\" }` \n", + "- `tokens { name: \"mangé\" }`\n", + "- `tokens { cardinal { integer: \"35\" } }` \n", + "- `tokens { name: \"pommes\" }`\n", + "\n", + "These tokens are then passed to a 'verbalizer' WFST, which renders each token in a conventional written form:\n", + "\n", + "- `tokens { name: \"le\" }` -> `le` \n", + "- `tokens { date { day: \"1\" month: \"juillet\" } } ` -> `1ᵉʳ` \n", + "- `tokens { name: \"il\" }` -> `juillet`\n", + "- `tokens { name: \"il\" }` -> `il` \n", + "- `tokens { name: \"a\" }` -> `a`\n", + "- `tokens { name: \"mangé\" }` -> `mangé` \n", + "- `tokens { cardinal { integer: \"35\" } }` -> `35` \n", + "- `tokens { name: \"pommes\" }` -> `pommes`\n", + "\n", + "and merged into a normalized string:\n", + "\n", + "`le 1ᵉʳ juillet il a mangé 35 pommes`\n", + "\n", + "With the equivalent TN task being the reverse process. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_n-5JExAbvwr" + }, + "source": [ + ">**_Note:_**\n", + ">A few things to note specific to inverse text normalization: \n", + ">- Each class token has a unique set of field names that must be parsed by the classifier. The default field names for NeMo are chosen to mirror the syntax in [Sparrowhawk](https://github.com/google/sparrowhawk) to enable deployment. If these fields are not exact, you will not be able to use Sparrowhawk.\n", + ">- NeMo assumes no punctuation (unless explicitly provided in the grammar) and all lower casing to ease integration with upstream ASR.\n", + ">- The `name` class token is default for any token that does not require processing. It will be left 'as is.'\n", + ">- You may note how the tokenizer performed the conversion of `premier` to `1` while the verbalizer normalized `1` -> `1ᵉʳ`. Such decisions are implementation dependent and will vary depending on preference and language. (That is, normalization from `premier` -> `1ᵉʳ` could have been a tokenization step.)\n", + ">- By default, NeMo will create several permutations of key values in a token to ease normalization. That is, given the token `tokens { date { day: \"1\" month: \"juillet\" } }`, it will also produce paths for `tokens { date { month: \"juillet\" day: \"1\" } }`. To prevent this and avoid ambiguity in verbalizer input, tokens can be assigned a `preserve_order` attribute to prevent permutation. (e.g. `tokens { date { day: \"1\" month: \"juillet\" preserve_order: true } }`) (We will discuss this [later in the tutorial](#verbalizer).)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## WFST Classes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "NeMo Text Processing's base languages supports a range of semiotic classes to permit integration with Sparrowhawk.\n", + "For this tutorial, we will be focusing on the following classes:\n", + "- CARDINAL\n", + "- ORDINAL\n", + "- DECIMAL\n", + "- MONEY\n", + "- TIME\n", + "- WHITELIST\n", + "- WORD\n", + "- PUNCTUATION\n", + "\n", + "While not comprehensive, these classes will provide enough foundation and exposure to edge cases that you will feel comfortable constructing for other cases.\n", + "\n", + "**NOTE**: *If you intend to only develop for personal use with NeMo, you may rename these classes as desired. However, Sparrowhawk integration\n", + "REQUIRES use of only these tags and their assigned attributes. For list of Sparrowhawk tokens and attributes, [consult the Sparrowhawk repository](https://github.com/yzhang123/sparrowhawk/blob/test/src/proto/semiotic_classes.proto)*" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Further Reading" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you wish to learn more about NeMo Text Processing, you may wish to consult the following:\n", + "- [Y. Zhang, E. Bakhturina, K. Gorman, and B. Ginsburg, \"NeMo Inverse Text Normalization: From Development To Production\"](https://arxiv.org/pdf/2104.05055.pdf)\n", + "- [NeMo's Text Normalization Documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html) \n", + "- [NeMo's Text Normalization Deployment Documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_processing_deployment.html)\n", + "- NeMo's [Text Normalization Introduction Tutorial](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Text_Normalization.ipynb)\n", + "- [Sparrowhawk Documentation](https://github.com/google/sparrowhawk)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For further information regarding WFSTs, please see:\n", + "- [D. Jufasky and J. Martin, *Natural Language Processing*, Ch. 2](https://web.stanford.edu/~jurafsky/slp3/2.pdf)\n", + "- [K. Gorman and R. Sproat, *Finite-State Text Processing*](http://www.morganclaypoolpublishers.com/catalog_Orig/product_info.php?products_id=1636)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XFdXRcnUfI25" + }, + "source": [ + "# Getting Started \n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "K3Zl3VwqdYqL" + }, + "source": [ + "To begin tokenizer development, make sure you have [installed NeMo from source](https://github.com/NVIDIA/NeMo)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rGg7Bf13FXgc" + }, + "source": [ + "For this tutorial, we will focus on developing an Inverse Text Normalization system, such as one you may encounter in downstream ASR processing. As such, we will navigate to\n", + "`nemo_text_processing/inverse_text_normalization` and create a directory for our target language (French) and subdirectories\n", + "for `taggers` and `verbalizers`. You may also wish to create a `data` subdirectory to ease navigation.\n", + "\n", + "(Note, for text normalization, the suggested directory structure would be the same within the `nemo_text_processing/text_normalization` folder. In fact, many of NeMo's grammars actively share between.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T58E4pU4FN3A" + }, + "source": [ + "```bash\n", + "git clone https://github.com/NVIDIA/NeMo\n", + "cd NeMo && ./reinstall.sh\n", + "cd nemo_text_processing/inverse_text_normalization/\n", + "export LANGUAGE=fr # Change this to your desired language\n", + "mkdir $LANGUAGE\n", + "mkdir $LANGUAGE/taggers\n", + "mkdir $LANGUAGE/verbalizers\n", + "mkdir $LANGUAGE/data\"\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O1vfz-bUFpwz" + }, + "source": [ + "All WFSTs deployed in NeMo inherit from the `GraphFst` class.\n", + "While in most cases you can simply import from a pre-existing `graph_utils.py`, you may occasionally find it helpful for deployment to keep a copy \n", + "in your working directory for language specific edits. (For our purposes, we will be utilizing `nemo_text_processing.text_normalization.en.graph_utils`, which serves as default for NeMo's grammars.)\n", + "\n", + "You may also wish to keep a copy of `utils.py` (found in each language system's directory)\n", + "in your working directory to assist with pathing. (Make sure to adjust the imports towards your language.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "3OME84EmOQ4h", + "outputId": "6eea17f9-aae9-4176-ae35-3d1f0e94b4ea" + }, + "source": [ + "```bash\n", + "cp ../text_normalization/en/graph_utils.py $LANGUAGE/\n", + "cp ../text_normalization/en/utils.py $LANGUAGE/\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dependencies" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For development, we utilize `nemo_text_processing` and `pynini` (a Python library for efficient WFST construction and traversal, installed with `nemo` by default). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "While this tutorial will attempt to make use of `pynini` tools transparent, it does assume some familiarity with its syntax. For a more in-depth guide, the following will provide a function overview:\n", + "\n", + "- [K. Gorman, Pynini: A Python library for weighted finite-state grammar compilation](https://aclanthology.org/W16-2409.pdf)\n", + "- [Pynini Documentation](https://www.openfst.org/twiki/bin/view/GRM/PyniniDocs) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will also import the `pynutil` module for access to some extra functionality, along with writing a simple helper function for printing `pynini` graphs through the previously discussed 'shortest-path' heuristic." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sz18Ui8-8Kf4" + }, + "outputs": [], + "source": [ + "from pynini.lib import pynutil\n", + "\n", + "def apply_fst(text, fst):\n", + " \"\"\" Given a string input, returns the output string\n", + " produced by traversing the path with lowest weight.\n", + " If no valid path accepts input string, returns an\n", + " error.\n", + " \"\"\"\n", + " try:\n", + " print(pynini.shortestpath(text @ fst).string())\n", + " except pynini.FstOpError:\n", + " print(f\"Error: No valid output with given input: '{text}'\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cardinal WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rOyLZb9DgLoh" + }, + "source": [ + "The vast majority of ITN tasks require the ability to recognize and denormalize numbers. As such, we will begin with developing a Classifier and Verbalizer for Cardinal (integer) numbers. (e.g. `-3,-2,-1,0,1,2,3,4,5....99,100,101...`)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9GZQkH1V89kh" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will begin by first constructing a Cardinal WFST, using French as an example language. While your target language will obviously differ greatly from our example, you will likely find some several similarities, such as:\n", + "- Use of a (semi) regular decimal (base-10) counting system. (A common - but not universal - feature of natural languages.)\n", + "- Incorporation of several irregularities requiring contingencies in our WFST construction. (e.g. a pseudo vigesimal (base-20) series.)\n", + "- Use of gender and number agreement in enumeration." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Digits" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NzJ2DIwc_TT3" + }, + "source": [ + "We shall begin with the first decimal place. As these numbers serve as the building blocks for the rest of our WFST, we shall begin by explicitly calling their WFST mappings with `pynini.string_map`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "u0H4qg4BjYfB" + }, + "outputs": [], + "source": [ + "zero = pynini.string_map([(\"zéro\",\"0\")]) # French only pronounces zeroes as stand alone\n", + "digits = pynini.string_map([ # pynini function that creates explicit input-output mappings for a WFST\n", + "\t\t\t\t(\"un\",\"1\"),\n", + "\t\t\t\t(\"une\",\"1\"),\n", + "\t\t\t\t(\"deux\",\"2\"),\n", + "\t\t\t\t(\"trois\",\"3\"),\n", + "\t\t\t\t(\"quatre\",\"4\"),\n", + "\t\t\t\t(\"cinq\",\"5\"),\n", + "\t\t\t\t(\"six\",\"6\"),\n", + "\t\t\t\t(\"sept\",\"7\"),\n", + "\t\t\t\t(\"huit\",\"8\"),\n", + "\t\t\t\t(\"neuf\",\"9\")\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0nHjY-NNjdWQ" + }, + "source": [ + "We may also simply write a `tsv` file in a separate data folder " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "- zéro\t0\n", + "- un\t1\n", + "- une\t1\n", + "- deux\t2\n", + "- trois\t3\n", + "- quatre\t4\n", + "- cinq\t5\n", + "- six\t6\n", + "- sept\t7\n", + "- huit\t8\n", + "- neuf\t9" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xicKcZLEzQTg" + }, + "source": [ + "and import with `string_file`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`digits = pynini.string_file(\"data/digits.tsv\")`\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If utils.py is in working directory you may also use `get_abs_path`, which will always call paths relative to your {LANGUAGE} directory:\n", + "\n", + "`from nemo_text_processing.inverse_normalization.{LANGUAGE}.utils import get_abs_path`\n", + "\n", + "`digits = pynini.string_file(get_abs_path(\"data/digits.tsv\"))`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yPccmicQkYAB" + }, + "source": [ + "While we will use `string_map` throughout this tutorial, please note that NeMo employs the later option for maintainability and recommends its use instead." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Teens" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FQJiJcVMrNmC" + }, + "source": [ + "Let us consider our next set of numbers:\n", + "- 10 - dix\n", + "- 11 - onze\n", + "- 12 - douze\n", + "- 13 - treize\n", + "- 14 - quatorze\n", + "- 15 - quinze\n", + "- 16 - seize\n", + "- 17 - dix-sept\n", + "- 18 - dix-huit\n", + "- 19 - dix-neuf\n", + "\n", + "Like before, we can simply use `string_map` to compose a WFST for them. But note how there is some redundancy in the number set: `17`, `18`, and `19` are all of the form `dix + digit`. It would be more efficient simply to reuse our prior WFST in these cases than simply creating new arcs, states, and weights. \n", + "\n", + "We can achieve this using pynini's string concatenation function to extend the accepted input strings. First we will create an WFST for `11-16`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "orSgBwyXsfY5" + }, + "outputs": [], + "source": [ + "teens = pynini.string_map([\n", + "\t\t\t\t(\"onze\",\"11\"),\n", + "\t\t\t\t(\"douze\",\"12\"),\n", + "\t\t\t\t(\"treize\",\"13\"),\n", + "\t\t\t\t(\"quatorze\",\"14\"),\n", + "\t\t\t\t(\"quinze\",\"15\"),\n", + "\t\t\t\t(\"seize\",\"16\"),\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "s1yIgigdtriQ" + }, + "source": [ + "Now, we will create a `tens` WFST that is responsible for mapping all instances of \"dix\" and concatenate (accomplished with the overloaded `+` operator) with the prior `digits` WFST. (Deleting any possible hyphens in-between with a build in `delete_hyphen`.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CzwZrFCkt87W" + }, + "outputs": [], + "source": [ + "tens = pynini.string_map([(\"dix\", \"1\")])\n", + "delete_hyphen = pynini.closure(pynutil.delete(\"-\"), 0, 1) # Applies a closure from 0-1 of operation. Equivalent to regex /?/\n", + "\n", + "graph_tens = tens + delete_hyphen + digits" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2knCwybmuTDn" + }, + "source": [ + "We now can combine the `teens` and `graph_tens` WFST together through the union operation (done with the overloaded `|` operator), allowing our choice of either graph." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WIRJ4PE7uRrl" + }, + "outputs": [], + "source": [ + "graph_tens_and_teens = graph_tens | teens" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TGkzKoeuxbeA" + }, + "source": [ + "Let's see if it works through the string function:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "v2iD0_HnxdUV", + "outputId": "1d8f434f-ff8a-4c85-b8d0-1127e4587ddf" + }, + "outputs": [], + "source": [ + "apply_fst(\"dix-huit\", graph_tens_and_teens)\n", + "apply_fst(\"seize\", graph_tens_and_teens)\n", + "apply_fst(\"dix\", graph_tens_and_teens)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Yh2f-3rux8_2" + }, + "source": [ + "The first two worked, but why did we get an error with \"dix\"? If you look back, you'll notice that while our graph has a mapping from \"dix\" to `1` - the concatenation with `digits` makes the assumption that some input from those strings will follow. That is, we left no opportunity for an *omission* of `digits`.\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OM_eJYlV1UVp" + }, + "source": [ + "![dix_to_digits.png](images/dix_to_digits.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "M4xCMKRA1Wzw" + }, + "source": [ + "You may also note that this issue would hold also if we wanted to normalize only digits - our graph would error out since it's expecting a `tens` or input first. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XJHnlJCm1dPv" + }, + "source": [ + "We can fix both of these problems by allowing an option to simply insert a zero without any extra input. (Much like our \"cent\" example.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9_vvJ9Bl1dYQ" + }, + "source": [ + "![dix_to_digits_with_insert.png](images/dix_to_digits_with_insert.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hJq3uoMN2OcC" + }, + "source": [ + "This may be accomplished through use of the `pynutil.insert` function:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7h9xuNfA081P" + }, + "outputs": [], + "source": [ + "graph_digits = digits | pynutil.insert(\"0\") # inserts zero if no digit follows" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fA_L_6Ky2SHm" + }, + "source": [ + "And for `graph_tens`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jelVA81o2RXu" + }, + "outputs": [], + "source": [ + "tens = tens | pynutil.insert(\"0\") | tens + delete_hyphen\n", + "graph_tens = tens + graph_digits" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Gb5uhpGr3I4X" + }, + "source": [ + "Bringing everything together:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bLkDddkA3Stu" + }, + "outputs": [], + "source": [ + "graph_teens_and_tens = graph_tens | teens\n", + "graph_all = graph_teens_and_tens | zero " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DESDKScv3r3P" + }, + "source": [ + "Let us now check our tests:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "7wrDNXuD3oh9", + "outputId": "661d2526-5aa0-4640-9285-bca15cd56c75" + }, + "outputs": [], + "source": [ + "apply_fst(\"dix-huit\", graph_all) \n", + "apply_fst(\"seize\" , graph_all)\n", + "apply_fst(\"dix\" , graph_all) \n", + "apply_fst(\"une\" , graph_all) \n", + "apply_fst(\"trois\" , graph_all) \n", + "apply_fst(\"quatre\" , graph_all) \n", + "apply_fst(\"zéro\" , graph_all)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Tz_k3NoB66Bv" + }, + "source": [ + "Now we have no more error - albeit at the cost of leading zeroes. (We will take care of this later in the section.)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tens" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2dJZAhE57an3" + }, + "source": [ + "Now that we've taken care of the teens, we can proceed with the rest of the tens. Like many languages, French employs a (fairly) regular schema of: `tens_digit + ones_digit` for 20-100. Indeed, we can summarize 20-69 in the following template:\n", + "\n", + "- 20 - vingt\n", + "- 21 - vingt-et-un\n", + "- 22 - vingt-deux\n", + "- 23 - vingt-trois\n", + "- 24 - vingt-quatre\n", + "- 25 - vingt-cinq\n", + "- 26 - vingt-six\n", + "- 27 - vingt-sept\n", + "- 28 - vingt-huit\n", + "- 29 - vingt-neuf\n", + "- 30 - trente\n", + "- 31 - trente-et-un\n", + "- 32 - trente-deux\n", + "- 33 - trente-trois\n", + "...\n", + "- 40 - quarante\n", + "...\n", + "- 50 - cinquante\n", + "...\n", + "- 60 - soixante\n", + "..." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BuaxVG35UKcs" + }, + "source": [ + "Expanding `tens` is fairly easy to accommodate this template: we simply extend our earlier `string_map` for the new terms in the 'tens place.' From there, we once again concatenate the `digits` WFST (along with a simple WFST to delete the occurrence of the \"-et-\" term that occasionally occurs.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qAnXlRkR32wt" + }, + "outputs": [], + "source": [ + "tens = pynini.string_map([\n", + "\t\t\t\t(\"dix\", \"1\"),\n", + "\t\t\t\t(\"vingt\",\"2\"),\n", + "\t\t\t\t(\"trente\",\"3\"),\n", + "\t\t\t\t(\"quarante\",\"4\"),\n", + "\t\t\t\t(\"cinquante\",\"5\"),\n", + "\t\t\t\t(\"soixante\",\"6\"),\n", + "\t\t])\n", + "\n", + "graph_et = pynutil.delete(\"-et-\")\n", + "\n", + "tens = tens | pynutil.insert(\"0\") | tens + pynutil.delete(\"-\") | tens + graph_et\n", + "\n", + "graph_tens = tens + graph_digits\n", + "graph_teens_and_tens = graph_tens | teens\n", + "graph_all = graph_teens_and_tens | zero " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-hJwqPDx8I2R" + }, + "source": [ + "#### Special Cases: 70-99" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zvBLvJdY9XPA" + }, + "source": [ + "However, things get tricky once we go beyond the 60s. Here, standard French possesses a notorious psuedo-vigecimal (base-20) system. For numbers 70-99:\n", + "\n", + "- 70 - soixante-dix <- Literally in English: \"sixty-ten\"\n", + "- 71 - soixante-et-onze <- Literally in English: \"sixty-and-eleven\"\n", + "- 72 - soixante-douze\n", + "- 73 - soixante-treize\n", + "- 74 - soixante-quatorze\n", + "- 75 - soixante-quinze\n", + "- 76 - soixante-seize\n", + "- 77 - soixante-dix-sept\n", + "- 78 - soixante-dix-huit\n", + "- 79 - soixante-dix-neuf\n", + "- 80 - quatre-vingts <- Literally in English: \"four-twenties\"\n", + "- 81 - quatre-vingt-un\n", + "- 82 - quatre-vingt-deux\n", + "- 83 - quatre-vingt-trois\n", + "- 84 - quatre-vingt-quatre\n", + "- 85 - quatre-vingt-cinq\n", + "- 86 - quatre-vingt-six\n", + "- 87 - quatre-vingt-sept\n", + "- 88 - quatre-vingt-huit\n", + "- 89 - quatre-vingt-nuef\n", + "- 90 - quatre-vingt-dix <- Literally in English: \"four-twenties-ten\"\n", + "- 91 - quatre-vingt-onze\n", + "- 92 - quatre-vingt-douze\n", + "- 93 - quatre-vingt-treize\n", + "- 94 - quatre-vingt-quatorze\n", + "- 95 - quatre-vingt-quinze\n", + "- 96 - quatre-vingt-seize\n", + "- 97 - quatre-vingt-dix-sept\n", + "- 98 - quatre-vingt-dix-huit\n", + "- 99 - quatre-vingt-dix-neuf" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HQNiwFDyVV_3" + }, + "source": [ + "As before, we want to take advantage of as much redundancy as we can without creating additional ambiguities that will impede graph traversal. \n", + "\n", + "We first note that - despite repeating prior words - \"quatre-vingt\" can be mapped to `8` without introducing ambiguity. This is because, despite \"quatre\" and \"vingt\" being present in our prior graphs, our WFST has no pathing for them in this exact order. As such, we can simply add it to `tens` and immediately improve our coverage for 81-89. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AvJqaHhE9Wbd" + }, + "outputs": [], + "source": [ + "tens = pynini.string_map([\n", + "\t\t\t\t(\"dix\", \"1\"),\n", + "\t\t\t\t(\"vingt\",\"2\"),\n", + "\t\t\t\t(\"trente\",\"3\"),\n", + "\t\t\t\t(\"quarante\",\"4\"),\n", + "\t\t\t\t(\"cinquante\",\"5\"),\n", + "\t\t\t\t(\"soixante\",\"6\"),\n", + " (\"quatre-vingt\", \"8\")\n", + "\t\t])\n", + "tens = tens | pynutil.insert(\"0\") | tens + delete_hyphen | tens + graph_et\n", + "graph_tens = tens + graph_digits\n", + "graph_teens_and_tens = graph_tens | teens\n", + "graph_all = graph_teens_and_tens | zero " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0_DtcpZxZTzX" + }, + "source": [ + "Of course, now we permit the occurrence of:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "V2leANlDhCvj", + "outputId": "db8d5d02-c848-4e50-df23-d8499538281c" + }, + "outputs": [], + "source": [ + "apply_fst(\"quatre-vingt\", graph_all)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_X_ef3sihCHH" + }, + "source": [ + "which is invalid (French uses the plural \"quatre-vingt**s**\" here.) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vgKT903Y6rIQ" + }, + "source": [ + "Should we alter the grammar because of this? Such a decision will largely be dependent on your intended implementation and design aims. If you see the question of 'legal' tokens as a responsibility of your upstream model, then there is no need for any alteration: \"quatre-vingt\" as a standalone token will simply not occur, so there is no input to be concerned with.\n", + "\n", + "However, if your ITN grammars are developed for an environment with low-fidelity ASR and/or where mistaken transcriptions incur heavy loss (e.g. ASR for driving directions, telephone-numbers, banking) then you may wish to err on the side of caution." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hf_FghLT7jdY" + }, + "source": [ + "If we wanted to go for the latter, we would want to mark that \"quatre-vingts\" maps **only** to `80`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JliFTF3mZSsJ" + }, + "outputs": [], + "source": [ + "quatre_vingt_plural = pynini.string_map([\n", + " (\"quatre-vingts\", \"80\")\n", + "\t\t])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "81_b3XPbicT1" + }, + "source": [ + "And that \"quatre vingt\" can only accompany non-zero digits:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "E4_dmg6uin2j" + }, + "outputs": [], + "source": [ + "quatre_vingt_singular = pynini.string_map([\n", + " (\"quatre-vingt-\", \"8\") # Note that the hyphen can be assumed now\n", + "\t\t])\n", + "graph_digits_without_zero = pynini.string_map([\n", + "\t\t\t\t(\"un\",\"1\"),\n", + "\t\t\t\t(\"une\",\"1\"),\n", + "\t\t\t\t(\"deux\",\"2\"),\n", + "\t\t\t\t(\"trois\",\"3\"),\n", + "\t\t\t\t(\"quatre\",\"4\"),\n", + "\t\t\t\t(\"cinq\",\"5\"),\n", + "\t\t\t\t(\"six\",\"6\"),\n", + "\t\t\t\t(\"sept\",\"7\"),\n", + "\t\t\t\t(\"huit\",\"8\"),\n", + "\t\t\t\t(\"neuf\",\"9\")\n", + "])\n", + "graph_eighties = (quatre_vingt_singular + graph_digits_without_zero) | quatre_vingt_plural" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mL7jpekV8VgP" + }, + "source": [ + "For the `70`'s and `90`'s, we would likewise need to form exclusive configurations for their number series, rewriting digits to recognize \"onze\", \"douze\", \"treize\"... as `1,2,3....` (Note, we'll have to separate `71` and `91` to manage \"soixante-**et**-onze\" vs. \"quatre-vingt-onze\".)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "y3dYkwK29zCX" + }, + "outputs": [], + "source": [ + "seventy_and_ninety = pynini.string_map([\n", + " (\"soixante-dix\", \"70\"),\n", + " (\"quatre-vingt-dix\", \"90\"),\n", + "\t\t])\n", + "\n", + "seventy_and_ninety_tens = pynini.string_map([\n", + " (\"soixante-\", \"7\"),\n", + " (\"quatre-vingt-\", \"9\"),\n", + "\t\t])\n", + "\n", + "seventy_and_ninety_one = pynini.string_map([\n", + " (\"soixante-et-onze\", \"71\"),\n", + " (\"quatre-vingt-onze\", \"91\"),\n", + "\t\t])\n", + "\n", + "seventy_and_ninety_digits = digits = pynini.string_map([ \n", + "\t\t\t\t(\"douze\",\"2\"),\n", + "\t\t\t\t(\"treize\",\"3\"),\n", + "\t\t\t\t(\"quatorze\",\"4\"),\n", + "\t\t\t\t(\"quinze\",\"5\"),\n", + "\t\t\t\t(\"seize\",\"6\"),\n", + "\t\t\t\t(\"dix-sept\",\"7\"), # For 97-99, digits are used as normal.\n", + "\t\t\t\t(\"dix-huit\",\"8\"),\n", + "\t\t\t\t(\"dix-neuf\",\"9\")\n", + "])\n", + "\n", + "graph_seventies_and_nineties = (seventy_and_ninety_tens + seventy_and_ninety_digits) | seventy_and_ninety | seventy_and_ninety_one " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4NCrCwEH9HVg" + }, + "source": [ + "Now we union them with our original `tens` series:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "psGCgxaH-btn" + }, + "outputs": [], + "source": [ + "tens = pynini.string_map([\n", + "\t\t\t\t(\"dix\", \"1\"),\n", + "\t\t\t\t(\"vingt\",\"2\"),\n", + "\t\t\t\t(\"trente\",\"3\"),\n", + "\t\t\t\t(\"quarante\",\"4\"),\n", + "\t\t\t\t(\"cinquante\",\"5\"),\n", + "\t\t\t\t(\"soixante\",\"6\"),\n", + "\t\t])\n", + "tens = tens | pynutil.insert(\"0\") | tens + delete_hyphen | tens + graph_et\n", + "\n", + "graph_tens = tens + graph_digits\n", + "graph_tens_with_special_cases = graph_tens | graph_seventies_and_nineties | graph_eighties\n", + "graph_teens_and_tens = graph_tens_with_special_cases | teens\n", + "graph_all = graph_teens_and_tens | zero " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xWjSAGRX_s0H" + }, + "source": [ + "Making sure test cases work:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "kapWmgos-xcn", + "outputId": "5e9c6f5c-1450-495f-cadf-2945355b651c" + }, + "outputs": [], + "source": [ + "apply_fst(\"quatre-vingt-treize\" , graph_all)\n", + "apply_fst(\"quatre-vingts\", graph_all)\n", + "apply_fst(\"quatre-vingt-deux\", graph_all)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hNUepfKZ_vS_" + }, + "source": [ + "And the other cases fail as expected:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wo2pCOXGAgYn", + "outputId": "0bbe2792-8bc9-40f7-dd28-4745bd1390e3" + }, + "outputs": [], + "source": [ + "apply_fst(\"quatre-vingt\", graph_all)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4VPuCTTtigh-" + }, + "source": [ + "Of course, there are other ways we could have reconfigured the grammar: we could simply make specific graphs for multiples of ten (`10,20,30..`) and all cases where \"-et-\" occurs (`21,31,41,51...91`). \n", + "\n", + "But this ignores a more important question: was any of this necessary in the first place? All these extra grammars did was simply expand coverage for thirty additional cardinals. And they still didn't exclude all faulty inputs! Note the following cases:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "KICvpeewCFyH", + "outputId": "174dd910-7329-4a5f-a5b0-5e796a174217" + }, + "outputs": [], + "source": [ + "apply_fst(\"dix-une\", graph_all) # supposed to be \"onze\"\n", + "apply_fst(\"dix-deux\", graph_all) # supposed to be \"douze\"\n", + "apply_fst(\"vingt-un\", graph_all) # supposed to be \"vingt-et-un\"\n", + "apply_fst(\"trente-un\", graph_all) # supposed to be \"trente-et-un\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0D130jIVCLp2" + }, + "source": [ + "We *still* need to address possible edge cases!\n", + "\n", + "All of this is to say that knowing your input domain before construction is imperative, as small decisions can easily determine your output range later down the line.\n", + "\n", + "Indeed, if you're particularly concerned with limiting input possibilities, it may be valid simply to write all unique options within a `string_map`. While a tad inelegant, it certainly assists in controlling your outputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RSp9w5ayA9ii" + }, + "outputs": [], + "source": [ + "graph_tens_special = pynini.string_map([\n", + "\t\t\t\t(\"soixante-dix\", \"70\"),\n", + "\t\t\t\t(\"soixante-et-onze\",\"71\"),\n", + " (\"soixante-douze\",\"72\"),\n", + "\t\t\t\t(\"soixante-treize\",\"73\"),\n", + "\t\t\t\t(\"soizante-quatorze\",\"74\"),\n", + "\t\t\t\t(\"soixante-quinze\",\"75\"),\n", + "\t\t\t\t(\"soixante-seize\",\"76\"),\n", + " (\"soixante-dix-sept\",\"77\"),\n", + " (\"soixante-dix-huit\",\"78\"),\n", + "\t\t\t\t(\"soixante-dix-neuf\",\"79\"),\n", + " (\"quatre-vingts\", \"80\"),\n", + " (\"quatre-vingt-un\", \"81\"),\n", + " (\"quatre-vingt-une\", \"81\"),\n", + "\t\t\t\t(\"quatre-vingt-deux\",\"82\"),\n", + " (\"quatre-vingt-trois\",\"83\"),\n", + " (\"quatre-vingt-quatre\",\"84\"),\n", + " (\"quatre-vingt-cinq\",\"85\"),\n", + " (\"quatre-vingt-six\",\"86\"),\n", + " (\"quatre-vingt-sept\",\"87\"),\n", + " (\"quatre-vingt-huit\",\"88\"),\n", + " (\"quatre-vingt-neuf\",\"89\"),\n", + " (\"quatre-vingt-dix\",\"90\"),\n", + " (\"quatre-vingt-onze\",\"91\"),\n", + " (\"quatre-vingt-douze\",\"92\"),\n", + " (\"quatre-vingt-treize\",\"93\"),\n", + " (\"quatre-vingt-quatorze\",\"94\"),\n", + " (\"quatre-vingt-quinze\",\"95\"),\n", + " (\"quatre-vingt-sieze\",\"96\"),\n", + " (\"quatre-vingt-dix-sept\",\"97\"),\n", + " (\"quatre-vingt-dix-huit\",\"98\"),\n", + " (\"quatre-vingt-dix-neuf\",\"99\"),])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NUPs1qOUg-hE" + }, + "source": [ + "Which is more efficient? Once again, it is dependent on your language and implementation. If we simply visualize each graph and their number of states:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "sQ9GsIkNzxsU", + "outputId": "d70ca927-9c43-4f49-846c-c181e725e011" + }, + "outputs": [], + "source": [ + "constructed_version = (graph_seventies_and_nineties | graph_eighties)\n", + "constructed_version.num_states()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Xsgdu5TYx09_", + "outputId": "5812912f-883b-42e8-afbf-3ec4a0170345" + }, + "outputs": [], + "source": [ + "string_map_version = graph_tens_special\n", + "string_map_version.num_states()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9jzn_U7s0Sit" + }, + "source": [ + "We see that their number of states (graph vertexes) are almost equal. Yet, if we use `pynini.optimize` - a method that calls a suite of WFST minimization algorithms: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "7YtqhOY90iF0", + "outputId": "26f0f51b-b00d-4f5a-9b2f-330c9812666a" + }, + "outputs": [], + "source": [ + "constructed_version.optimize()\n", + "constructed_version.num_states()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "y93SqnOf0qa8", + "outputId": "74efcbfa-a272-4fc6-e36e-f1e31c6df221" + }, + "outputs": [], + "source": [ + "string_map_version.optimize()\n", + "string_map_version.num_states()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2cTdQj9L0xhl" + }, + "source": [ + "We see the latter possessing a significantly larger amount of graph vertices. \n", + "\n", + "So the decision will be dependent on your ITN needs, language, concern with efficiency, and design philosophy. Further, even decisions of language dialect will have an influence. \n", + "(e.g. Belgian, Canadian, and Swiss dialects of French will dispense with elements of the vigecimal system for the decimal schema.)\n", + "\n", + ">**_Note:_** \n", + ">while `nemo_text_processing` grammars aim to minimize invalid productions, they assume input tokens are valid strings for a target language. (e.g. The mapping of \"quatre-vingt\" to `80` is permitted since it is not likely to occur in a valid French string.)* " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "V1djCnvY3CjW" + }, + "source": [ + "For more information on optimization algorithms for WFSTs, please see:\n", + "\n", + "- [M. Mohri,\"Generic epsilon-removal and input epsilon-normalization algorithms for weighted transducers\"](https://cs.nyu.edu/~mohri/pub/ijfcs.pdf)\n", + "- [M. Mohri, \"Weighted automata algorithms\"](https://cs.nyu.edu/~mohri/pub/hwa.pdf)\n", + "- [K. Thompson, \"Programming techniques: regular expression search algorithm\"](http://www.oilshell.org/archive/Thompson-1968.pdf)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Hundreds\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dqPUdVBbi6gU" + }, + "source": [ + "\n", + "Moving on to the case of three digit cardinals (\"hundreds\"), it is likely that your chosen language becomes more regular in its schema. For instance, practically all French numbers `>100` obey the following:\n", + "\n", + "- `digit_from_1_to_9 + word_for_hundred + digit_from_1_to_99`\n", + "\n", + "For example:\n", + "- `203` - \"deux-cent-trois\"\n", + "- `530` - \"cinq-cent-trente\"\n", + "- `880` - \"huit-cent-quatre-vingt\"\n", + "\n", + "As such, we can write a simple `hundreds` WFST as:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lOt-gc-FiF-X" + }, + "outputs": [], + "source": [ + "hundreds = graph_digits + delete_hyphen + pynutil.delete(\"cent\") + delete_hyphen + graph_all" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "Fyn1uL_NoEiz", + "outputId": "d491680b-1b3e-4762-8470-497833b82b0e" + }, + "outputs": [], + "source": [ + "apply_fst(\"deux-cent-trois\", hundreds)\n", + "apply_fst(\"huit-cent-quatre-vingts\", hundreds)\n", + "apply_fst(\"cinq-cent-trente\" , hundreds) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qDjq_KfnoD5C" + }, + "source": [ + "Indeed, the use of French only presents two complications:\n", + "- French uses *only* the word \"cent\" for `100`. (Instead of \"un cent\".)\n", + "- 'Pure' multiples of a hundred (`200,300,400....`) use the plural \"cents\".\n", + "\n", + "The second one is the easier of the two so let's start there. There are actually two options open to us. First, we could treat \"cents\" the same way as we did \"cent\" in the base case and simply delete it. From there, the lack of any following inputs will allow the WFST to insert the trailing zeroes as appropriate." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "m2F-sumbxqLE" + }, + "outputs": [], + "source": [ + "cents = pynini.accep(\"cent\") | pynini.accep(\"cents\") # Creates a Finite State (Accep)tor, mapping inputs back to themselves\n", + "hundreds = graph_digits + delete_hyphen + pynutil.delete(cents) + delete_hyphen + graph_all" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VisQu_Etx-QB" + }, + "source": [ + "Or we can use it as a cue to 'shortcut' the WFST to immediately insert zeroes." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VspiTN5Vxxjl" + }, + "outputs": [], + "source": [ + "graph_cents = pynini.cross(\"cents\", \"00\") # Creates a single input-output mapping\n", + "hundreds = graph_digits + delete_hyphen + ((pynutil.delete(\"cent\") + delete_hyphen + graph_all) | graph_cents)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "meVn5BiyyX5v" + }, + "source": [ + "For the case of solitary \"cent\", we need to make sure our output is `1` only in the case that no digit precedes the occurrence. Here we need to be confident in the structure of our WFST and that any possible ambiguity has been dealt with by this point. (Something to keep in mind as we move to the thousands.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "277Z-zLWyWAf" + }, + "outputs": [], + "source": [ + "graph_cent = pynini.cross(\"cent\", \"1\")\n", + "graph_hundreds_first_digit = (graph_digits + delete_hyphen + pynutil.delete(cents)) | graph_cent\n", + "graph_hundreds = graph_hundreds_first_digit + delete_hyphen + graph_all" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "FNZlJsvS_Yvt", + "outputId": "e85ae561-e7a1-4b6a-e394-f0194fdb89e7" + }, + "outputs": [], + "source": [ + "apply_fst(\"trois-cents\", graph_hundreds) \n", + "apply_fst(\"cent\", graph_hundreds)\n", + "apply_fst(\"cent-trois\", graph_hundreds) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Thousands" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e7Dy5slLzp-K" + }, + "source": [ + "For quite a few languages, managing the WFST for the thousands place is the last aspect to figure out, as the higher powers of ten reuse the same schema. (For those working with counting systems that reserve special terms for \"ten-thousand\" (e.g. Chinese derived counting systems), you may need to extend unique coverage to the next power of ten.)\n", + "\n", + "For French, the question of thousands is rather simple: `digits_from_1_to_999 + mille + digits_from_1_to_999`\n", + "\n", + "With only the exception that any expression of one thousand drops a leading digit. \n", + "- `1,000` -> \"mille\"\n", + "- `1,001` -> \"mille-un\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AvsnAAiPzlu_" + }, + "outputs": [], + "source": [ + "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", + "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", + "\n", + "graph_thousands = (graph_one_thousand | graph_many_thousand) + delete_hyphen + graph_hundreds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "i3m9TG7Y4tkl", + "outputId": "d3f1f81d-c463-4934-9df7-3b8f2b67798f" + }, + "outputs": [], + "source": [ + "apply_fst(\"cent-mille-deux-cents\", graph_thousands)\n", + "apply_fst(\"deux-cent-mille-deux-cents\", graph_thousands)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NoevSTZGGT17" + }, + "source": [ + "### Weighting" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A2gcVIZM0-iv" + }, + "source": [ + "Question: will this cover all our grammar so far? (Hint: what assumptions were made about \"cent\"/\"cents\"?)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "cCFtPhr1BjAc", + "outputId": "048e0d93-a4a8-4f4e-d461-bfd70e911aff" + }, + "outputs": [], + "source": [ + "apply_fst(\"deux-mille-un\", graph_thousands)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ne-7L9Cd4t-8" + }, + "source": [ + "Once again, we need to introduce the possibility of the prior power of ten not occurring in the string. There must be an option for simply inserting a string of `0` in place of the omitted \"cent\"." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iockqXdn-aG4" + }, + "source": [ + "Further, we want to be careful with how cavalier we have been with insertions. Consider the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "bxJlSnj2-Xw3", + "outputId": "6722e5ef-8a7f-43e1-84fe-b3f5f18307e1" + }, + "outputs": [], + "source": [ + "apply_fst(\"mille-cent-un\", graph_thousands) # Should be 1101\n", + "apply_fst(\"mille-cent\", graph_thousands) # 1100" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fq5zEayA-kOx" + }, + "source": [ + "It appears that our WFST has developed a tendency to simply 'ignore' some of these higher powers. Let us return to our code for `graph_hundreds` and `graph_thousands`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "S2aV1KQ4-1iP" + }, + "outputs": [], + "source": [ + "graph_cents = pynini.cross(\"cents\", \"00\")\n", + "graph_cent = pynini.cross(\"cent\", \"1\")\n", + "graph_hundreds_first_digit = (graph_digits + delete_hyphen + pynutil.delete(cents)) | graph_cent\n", + "graph_hundreds = (graph_hundreds_first_digit + delete_hyphen | pynutil.insert(\"0\")) + graph_all \n", + "\n", + "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", + "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", + "graph_thousands = (graph_one_thousand | graph_many_thousand) + delete_hyphen + graph_hundreds" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9avwOIkk-9qt" + }, + "source": [ + "Recall that throughout we have provided options for simply inserting zeroes in the case of omitted numbers? That tendency has finally caught up with us. The use of our previous `graph_hundreds` in `graph_many_thousands` now allows our graph to insert a string of `0`'s without penalty. \n", + "\n", + "You may note that this is very similar to the \"cents\" example brought up at the beginning, presenting a similar solution. We can control this output by making it too costly to traverse unless absolutely necessary for the graph. This can be accomplished simply by appending a weight to the insertion for hundreds:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MQG3j0U8CUAQ" + }, + "outputs": [], + "source": [ + "graph_hundreds = (graph_hundreds_first_digit + delete_hyphen | pynutil.insert(\"0\", weight=.1)) + graph_all \n", + "\n", + "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", + "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", + "graph_thousands = (graph_one_thousand | graph_many_thousand) + delete_hyphen + graph_hundreds" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "KNHhrYZ7Ca58", + "outputId": "a7d07372-733d-4837-c1e9-1dc58ba2b87c" + }, + "outputs": [], + "source": [ + "apply_fst(\"mille-cent-un\", graph_thousands)\n", + "apply_fst(\"mille-cent\", graph_thousands)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "51yPEaf2EkbD" + }, + "source": [ + "Why choose a weight of `.1`? Quite simply: it's arbitrary. As mentioned earlier, the default graph in `pynini` is a tropical semiring, which uses the `min` function to select among two arcs for path traversal. Since all our paths so far are weight `0`, any positive value will ensure that it is a last option among path traversal. (Note, this conversely entails any negative weight path will be prioritized.)\n", + "\n", + "That we chose this number as a small value comes from a place of caution: the tropical semiring uses an additive function to calculate the total weight of an entire path to traverse a WFST. As our grammars can easily become massive, this means that small weights can have major impact down the line. Further, by constraining path weights to small values, we can have general certainty towards the maximum weight of any individual graph, allowing us to add constraints regarding maximum token length and token hierarchy. (As explained in [later sections](#classifyweights).) As such, when using weights in a localized setting, it is best to use small values to avoid unforeseen escalation. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iScKgvRxGt-B" + }, + "source": [ + "### Higher Powers\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rtHEd6OE2WSg" + }, + "source": [ + "At this point, we can propose a general heuristic with escalating to higher powers of ten: they always need a way for their absence to be accommodated in the WFST. Further, they require some weighting to prevent this absence from developing into a string of omitted values. To avoid further bumps, we'll take care of this now with `graph_thousands`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iZMN7wcE2lH5" + }, + "outputs": [], + "source": [ + "graph_one_thousand = pynini.cross(\"mille\", \"1\")\n", + "graph_many_thousand = graph_hundreds + delete_hyphen + pynutil.delete(\"mille\")\n", + "graph_thousands = (graph_one_thousand | graph_many_thousand | pynutil.insert(\"000\", weight=.001)) + delete_hyphen + graph_hundreds" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Fkc3LIH824P7" + }, + "source": [ + "\n", + "For the rest of French (and many other languages), the rest of the work is simply repeating the prior pattern for the thousands element: \n", + "`hundreds + word_for_higher_power + hundreds.....` Of course there will be some variation in this schema, but the recursion should be regular. (It is rather rare that languages appropriate unique forms for these higher counts.) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qGnK4ARX4Nay" + }, + "source": [ + "To finish French, we can list off the following equivalent for higher powers of ten:\n", + "- `million` - \"million/millions\" \n", + "- `billion` - \"milliard/milliards\"\n", + "- `trillion` - \"billion/billions\"\n", + "\n", + "Like the \"cent/cents\" rule, these values alternate with a plural form in the case of multiples of the value. Writing them out:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sBu7-dub4vxz" + }, + "outputs": [], + "source": [ + "millions = pynini.accep(\"million\") | pynini.accep(\"millions\")\n", + "graph_millions = ((graph_hundreds + delete_hyphen + pynutil.delete(millions) + delete_hyphen) | pynutil.insert(\"000\", weight=.1) # We need three zeroes now\n", + " ) + graph_thousands" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LmMeCHXr5Bb5" + }, + "outputs": [], + "source": [ + "billions = pynini.accep(\"milliards\") | pynini.accep(\"milliard\")\n", + "graph_billions = ((graph_hundreds + delete_hyphen + pynutil.delete(billions) + delete_hyphen)| pynutil.insert(\"000\",weight=.1) # We need three zeroes now\n", + " ) + graph_millions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CIRIeQEg5B0J" + }, + "outputs": [], + "source": [ + "trillions = pynini.accep(\"billion\") | pynini.accep(\"billions\")\n", + "graph_trillions = ((graph_hundreds + delete_hyphen + pynutil.delete(trillions) + delete_hyphen) | pynutil.insert(\"000\",weight=.1) # We need three zeroes now\n", + " ) + graph_billions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sRNUPx-15J1v" + }, + "source": [ + "Bringing all together:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0dLOWm_B5SwQ" + }, + "outputs": [], + "source": [ + "graph = graph_trillions | zero" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nBFE3BrN6IPR" + }, + "source": [ + "Let's try it out:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "6lWwtR1S6LI4", + "outputId": "3a6740ee-9e92-4500-c2c8-965131167e58" + }, + "outputs": [], + "source": [ + "example = \"deux-cent-milliard-quatre-million-deux-cent-quatre-vingt-onze\"\n", + "apply_fst(example, graph) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Finishing Touches" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-w3KgX6C6mff" + }, + "source": [ + "Now that we have our cardinal in place, we can take care of that stylistic issue of the leading zeroes. For this, we want to develop a 'filter' that deletes all zeroes preceding the first non-zero in the string, and leave the rest 'as is.'\n", + "\n", + "First let us create the filter by calling on `NEMO_DIGIT`- a `graph_util` WFST that only permits digits as input. With it, we'll create a WFST that will delete all leading zeroes in a sting. We then compose this (using `@`) onto our original graph, creating a new graph that accepts inputs from our original but produces only the outputs of `clean_cardinal`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 290 + }, + "id": "EA4VnRe6FO-2", + "outputId": "59e412b3-a445-4172-ee64-b0f80281a167" + }, + "outputs": [], + "source": [ + "delete_leading_zeroes = pynutil.delete(pynini.closure(\"0\")) # will delete all zeroes under closure. Equivalent to regex * operator\n", + "stop_at_non_zero = pynini.difference(NEMO_DIGIT, \"0\") # creates a graph that accepts all input-outputs from NEMO_DIGIT except 0\n", + "rest_of_cardinal = pynini.closure(NEMO_DIGIT) # accepts all digits that may follow\n", + "\n", + "clean_cardinal = delete_leading_zeroes + stop_at_non_zero + rest_of_cardinal\n", + "clean_cardinal = clean_cardinal | \"0\" # We don't want to ignore the occurrence of zero\n", + "\n", + "graph = graph @ clean_cardinal " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "piP9nqQkHpo3" + }, + "source": [ + "Now our WFST will output our numbers as normal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dnQ9odSpIAB7" + }, + "outputs": [], + "source": [ + "apply_fst(example, graph)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Final Notes\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "p7zt8lVsK2rY" + }, + "source": [ + "We have finally formulated a grammar that will process French cardinals into numeric representation. Of course, not every grammar you write will be for French. But several of the principles we've worked through will be invaluable in your own development. Before moving on, here's a quick summary of (almost) universal points to take away for WFST construction.\n", + "- Decide at the beginning of construction the level of constraint you wish for your grammar. Is it necessary to have a specific domain or can you rely on upstream models to narrow your input possibilities for you? \n", + "- Work iteratively upwards from the smallest place value of your numeric system. This will assist you in forming building blocks for larger values. \n", + "- Always allow for the possibility of omission of previous place values. (Not every number in the thousands will contain mention of the hundreds place.)\n", + "- For each place value, consider how the sub-grammar will affect the preceding and following place values. Are there exceptions that you've built into the grammar that may become problematic later on?\n", + "- Utilize weights for default insertions to limit path traversal to only final options. When doing so, use small values to avoid escalating problems in your larger grammar." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nvyHg1bQIIHD" + }, + "source": [ + "With that handled, we can move on to converting this grammar into a Classifier." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gJ1YJUvhIZwm" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "q2L2x0crIeXQ" + }, + "source": [ + "Now that we have a grammar that will convert individual tokens into number strings, we now want to focus on building it into a classifier to properly tag candidate tokens. This requires a couple of properties:\n", + "- It recognizes any valid token and permits traversal through the WFST graph\n", + "- Conversely, it does not allow invalid tokens to traverse the WFST graph\n", + "- It properly disambiguates overlap among ambiguous cases\n", + "- It attributes the proper attributes to a classified token\n", + "\n", + "While this seems like a lot, in practice this just means that your grammar will need a few more tweaks to improve exclusivity." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ArEYn7RWKcYI" + }, + "source": [ + "NeMo ITN performs token classification through a series of `GraphFst` classes and assumes deployment of your grammars through an object that inherits from this class. As such, you will need to instantiate your grammar as a `CardinalFST` " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 368 + }, + "id": "GWgMSybqLqiS", + "outputId": "597c00ae-0f62-417f-888c-88c81c24a3fc" + }, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"classify\")\n", + " # Rest of the grammar here\n", + " # ....... \n", + " #........." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SIE8dNQlL52G" + }, + "source": [ + "While the naming convention may vary, the `name` and `kind` properties must be set accordingly to permit Sparrowhawk integration.\n", + "\n", + "Further, the resulting graph must produce the classified token within the following format:\n", + "`token { cardinal { integer: \"DIGIT_STRING\" } }`\n", + "\n", + "This is accomplished by a series of string insertions:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aC_c64KSNTCg" + }, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"classify\")\n", + " # Rest of the grammar here\n", + " # ....... \n", + " #.........\n", + " self.fst = pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AGLQxOSzOK1F" + }, + "source": [ + "Followed by a call of the parent `GraphFst.add_tokens()` method:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Jz-UXFipORps" + }, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"classify\")\n", + " # Rest of the grammar here\n", + " # ....... \n", + " #.........\n", + " self.fst = pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")\n", + " final_graph = self.add_tokens(graph)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gh23S7BHOY0r" + }, + "source": [ + "Which will insert the appropriate formatting. Note that this formatting must be exact: a single space must follow each field name and each value must be within escaped double quotes.\n", + "\n", + "In the event that you also wish for `CardinalFst` to indicate negative values, the optional `negative: ` property may be used.\n", + "\n", + "For instance, French indicates negative values by prefacing the quantity with \"moins.\" As such:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3JbTn35cOx0k" + }, + "outputs": [], + "source": [ + "optional_minus_graph = pynini.closure(\n", + " pynutil.insert(\"negative: \") + pynini.cross(\"moins\", \"\\\"-\\\"\") + \" \", 0, 1 # Note the extra space to separate the value from the integer field\n", + ")\n", + "\n", + "final_graph = optional_minus_graph + pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DCs1048v6N0K" + }, + "source": [ + "All together, your `CardinalFst` ultimately serves as a wrapper for your grammar, save with the addition of a few insertions to assist processing:\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "eo6uEz1s5TJY" + }, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"classify\")\n", + " \n", + " ### Cardinal Grammar....\n", + " ### .....\n", + " graph = graph_trillions | zero \n", + "\n", + " ### Formatting grammar....\n", + " ### .....\n", + " graph = graph @ clean_cardinal\n", + "\n", + " ### Token insertion\n", + " optional_minus_graph = pynini.closure(\n", + " pynutil.insert(\"negative: \") + pynini.cross(\"moins\", \"\\\"-\\\"\") + \" \", 0, 1\n", + " )\n", + "\n", + " final_graph = optional_minus_graph + pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")\n", + "\n", + " final_graph = self.add_tokens(final_graph) # inserts the cardinal tag\n", + "\n", + " self.fst = final_graph" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MFIMdLCoZzLK" + }, + "source": [ + "Let's see a demonstration. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4CF6Iz9NZ7R_" + }, + "outputs": [], + "source": [ + "cardinal = CardinalFst().fst\n", + "\n", + "example = \"moins deux-cent-quatre\"\n", + "\n", + "apply_fst(example, cardinal)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uvUqpC_Q8FSt" + }, + "source": [ + "The verbalizer can be both the most crucial and simplest part of building each grammar. On one hand, it is the component that finalizes all of your previous work. If it is unable to properly normalize your text, everything has been for naught.\n", + "\n", + "On the other hand, your previous work has vastly limited the unpredictability of your input. Recall from our initial demonstration of the classifier-verbalizer system that and input like <> becomes:\n", + "\n", + "- `tokens { name: \"le\" }`\n", + "- `tokens { date { day: \"1\" month: \"juillet\" }` \n", + "- `tokens { name: \"il\" }` \n", + "- `tokens { name: \"a\" }` \n", + "- `tokens { name: \"mangé\" }`\n", + "- `tokens { cardinal { integer: \"35\" } }` \n", + "- `tokens { name: \"pommes\" }`\n", + "\n", + "Part of the purpose of the two stage set-up is that the input space for each verbalizer is obvious: it's simply the name of its semiotic class. As such, we only need to write our grammar to recognize its class, remove tokens accordingly, and then manage the attributes of each semiotic token." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "et1GgmBuAWzY" + }, + "source": [ + "We will begin as we did with our classifier and create a class to inherit from the `GraphFST` utility class:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NNKpgWtkAgEW" + }, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"verbalize\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OyAV39NsAqSN" + }, + "source": [ + "One of the useful aspects of the `GraphFst` utility is that it already possesses a built in graph that will recognize and remove semiotic tokens: `delete_tokens`. As such we need only concern ourselves with managing the properties of the Cardinal class:\n", + "- `integers`\n", + "- `negative`\n", + "\n", + "Here, the desired written format of your chosen language will dictate how you proceed. For French, we have the following rules for Cardinal numbers:\n", + "- A negative sign is written before the numeral.\n", + "- Cardinal numbers representing quantities (e.g. \"mille euros\"/ \"one thousand dollars\") are written with spaces in-between every three digits. (e.g. `1 000`)\n", + "- Cardinal numbers representing place in a sequence or addresses (\"page mille\"/\"page one thousand\") are written without spacing. (`1000`)\n", + "\n", + "The first property seems easy enough to handle: write a grammar that simply removes the `negative` formatting, leaving only `-`. (Recall that our Classifier only inserted the string if it was present.) \n", + "\n", + "For the final two, we may note that our intention to develop WFSTs for the Decimal, Measure, and Money classes already will cover most desired quantities. As such, we can leave the issue of spacing to those instances and let the Cardinal WFST default to the non-spacing case. (Note that this will be helpful with Time, Date, Telephone, Electronic, and Ordinal classes as they will not use the spacing format either. It is usually better to reserve specific formatting rules to other classes and let the Cardinal serve as a default.)\n", + "\n", + "As such, we just need our WFST to remove the `integer` property and `negative` property (if it occurs). These can be managed through the `pynutil.delete` function, as seen in the following:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 368 + }, + "id": "6MF2I6SLU7nf", + "outputId": "0437c4af-5c96-4122-8af0-ca37723c7228" + }, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"verbalize\")\n", + " \n", + " # Removes the negative attribute and leaves the sign if occurs\n", + " optional_sign = pynini.closure(\n", + " pynutil.delete(\"negative:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.accep(\"-\")\n", + " + pynutil.delete(\"\\\"\")\n", + " + delete_space,\n", + " 0,\n", + " 1,\n", + " )\n", + " \n", + " # removes integer aspect\n", + " graph = (\n", + " pynutil.delete(\"integer:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1) # Accepts at least one digit\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " \n", + " graph = optional_sign + graph # concatenates two properties\n", + "\n", + " delete_tokens = self.delete_tokens(graph) # removes semiotic class tag\n", + "\n", + " self.fst = delete_tokens.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QSX2KlZJbRAA" + }, + "source": [ + "Let's see if it will properly render a given token:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "JxaLm2k0bYIJ" + }, + "outputs": [], + "source": [ + "cardinal = CardinalFst().fst\n", + "example = 'cardinal { negative: \"-\" integer: \"204\" }'\n", + "\n", + "apply_fst(example, cardinal)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Bc0-QCBHWg-8" + }, + "source": [ + "That's it! We've now completed all aspects of our `CardinalFst` from grammar writing to Verbalization. While we still have quite a few semiotic classes left, you will find that they build off the `CardinalFst` quite easily, making progression much simpler and straightforward.\n", + "\n", + ">**_Note:_**\n", + ">- `delete_tokens` is called on the completed graph, despite the token class occurring first in the tokenized string. This is because the function intersects with an initial WFST that deletes the tags. As such, the function must be passed a completed graph.\n", + ">- In our initial example, all tokens were enclosed within a `token` category. Insertion and deletion of this category is managed by the main [Classifier](#tokenize-and-classify) and [Verbalizer](#verbalize-and-verbalize-final) respectively and is not a concern during individual class grammar development.\n", + ">- Earlier in the tutorial we noted that NeMo ITN permutates all WFSTs unless the `preserve_order` tag is passed as part of the Classifier. This allows you to ignore possible variation in designing the verbalizer and focus on whatever form of processing is easiest for the grammar. That is, the decision to process the `negative` property before the `integer` property is not chosen because of a consequence of the French language but instead because it is easier to write out with `pynini`. \n", + ">- Conversely, if your language is completely invariant in this regard, it may be more efficient to pass `preserve_order` through the Classifier and manage the property here in the Verbalizer. This allows NeMo ITN to avoid building states and arcs for each permutation, reducing graph size and compiling time." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aFUrbSdJ8Wk7" + }, + "source": [ + "# Ordinal WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "w1b0Z7f5Z9Ar" + }, + "source": [ + "Ordinals is the class of numbers used for enumerating order or placement of entities in a series. In some languages, they are simply derivations of cardinal numbers. For instance, English enumerates order as `first, second, third, fourth, fifth....` After the third ordinal, they become a regular pattern of `cardinal + 'th'`.\n", + "\n", + "Meanwhile, other languages may reserve specific counting systems for ordinals. For example, while Korean uses a Chinese derived counting system for several Cardinal related tasks, it uses derivations from a native counting system for ordering:\n", + "\n", + "**Cardinal**/**Ordinal** = **English**\n", + "- il/cheot-jae = \"First\"\n", + "- i/dul-jae = \"Second\"\n", + "- sam/set-jae = \"Third\"\n", + "- sa/net-jae = \"Fourth\"\n", + "- o/daseot-jae = \"Fifth\"\n", + "\n", + "If your language is of the latter variety, you will likely need to begin development of Ordinal WFST by repeating Cardinal WFST development before proceeding. (Or make it part of your previous Cardinal WFST and combining with a `union` operation.) While you can extend coverage to the level of Cardinal WFST, you will find most Ordinals to be sufficiently covered by only enumerating to a few hundreds. (e.g. Is it common in your language to speak of the \"one millionth\" in an order and/or write out `1,000,000th`?)\n", + "\n", + "For this portion of the tutorial, we will focus on the first type of ordinals - those that primarily derived by altering Cardinals." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oq_xA8NPiANw" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lhjcQS6oiD_w" + }, + "source": [ + "Continuing with our example language, we first begin by laying out our expected inputs and pinpointing a regular pattern to guide our WFSTs. We note the following examples:\n", + "\n", + " **English = French**\n", + " - \"first\" = \"premier/première\"\n", + " - \"second\" = \"second/seconde/deuxième\"\n", + " - \"third\" = \"troisième\"\n", + " - \"fourth\" = \"quatrième\"\n", + " - \"fifth\" = \"cinquième\"\n", + " - \"sixth\" = \"sixième\"\n", + " - \"seventh\" = \"septième\"\n", + "\n", + "From our examples inputs, it appears that spelling of French Ordinals follows a general format of: `cardinal + ième`. The only exceptions appear to be in the case of the first and second Ordinals - for which completely different roots appear - and the fourth and the fifth Ordinals - where the former drops the \"e\" at the end of the root (`quatre -> quatr`) and the latter appends a \"u\" (`cinq -> cinqu`). \n", + "\n", + "For the expected outputs, we observe the following examples:\n", + " - \"premier/première\" -> `1ᵉʳ/1ʳᵉ`\n", + " - \"second/seconde\" -> `2ᵈ/2ᵈᵉ`\n", + " - \"deuxième\" -> `2ᵉ`\n", + " - \"troisième\" -> `3ᵉ`\n", + " - \"quatrième\" -> `4ᵉ`\n", + " - \"cinquième\" -> `5ᵉ`\n", + " - \"sixième\" -> `6ᵉ`\n", + " - \"septième\" -> `7ᵉ`\n", + "\n", + "It appears that the output is simply the cardinal number of the root with an associated superscript. Since we have already constructed the Cardinal WFST, this means that the job of constructing an Ordinal WFST is simply a case of recognizing the cardinal root for the input and then utilizing a preconstructed Cardinal grammar to render the proper form alongside an associated superscript. That is, our tasks are to:\n", + "- Identify the proper superscript for the ordinal\n", + "- Change the ordinal back into a cardinal\n", + "- Use the Cardinal WFST to transform the cardinal into normalized form\n", + "- Properly render the ordinal using the normalized cardinal and proper superscript\n", + "\n", + "As information regarding the superscript will need to be conveyed through development of the Classifier, we will begin with creating the grammar necessary for rendering the ordinal as its cardinal root. \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AOUVZhiwT7hE" + }, + "source": [ + "### Stripping Suffixes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5nw0_lOTsEik" + }, + "source": [ + "Since French forms Ordinals by appending a suffix to Cardinals, we should start by creating a WFST to remove the suffix. Assuming that our grammar processes one token at a time, this means that we just need an WFST that will accept all tokens that end with \"ième\" and then delete the suffix from that token:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Rk89LhsxsHTO" + }, + "outputs": [], + "source": [ + "strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", + "graph_strip_morpheme = NEMO_SIGMA + strip_morpheme # accepts all strings until passed suffix, then deletes suffix" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pLg-PzdntV4N" + }, + "source": [ + "Now we can create a graph that permits all characters in a word token and deletes the ordinal suffix. (Note that this also means that the graph won't accept tokens without the suffix, helping us avoid false inputs.) \n", + "\n", + "We can now intersect this graph with our Cardinal WFST to now strip the suffixes from ordinals and treat them as cardinals. However, recall that our `CardinalFst` also inserted its own class tag. Obviously, we do not want to do this here as it will disrupt the formatting of the token. Instead, we should create a new subgraph *within* the `CardinalFst` class that will only produce the cardinals without tokens." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class CardinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"cardinal\", kind=\"classify\")\n", + " \n", + " ### Cardinal Grammar....\n", + " ### .....\n", + " graph = graph_trillions | zero \n", + "\n", + " ### Formatting grammar....\n", + " ### .....\n", + " graph = graph @ clean_cardinal\n", + " \n", + " ### NEW GRAPH\n", + " self.just_cardinals = graph # will produce cardinals without formatting\n", + "\n", + " ### Token insertion\n", + " optional_minus_graph = pynini.closure(\n", + " pynutil.insert(\"negative: \") + pynini.cross(\"moins\", \"\\\"-\\\"\") + \" \", 0, 1\n", + " )\n", + "\n", + " final_graph = optional_minus_graph + pynutil.insert(\"integer: \\\"\") + graph + pynutil.insert(\"\\\"\")\n", + "\n", + " final_graph = self.add_tokens(final_graph)\n", + "\n", + " self.fst = final_graph" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we call it for our graph:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vxDgBa4_t1nD" + }, + "outputs": [], + "source": [ + "graph_cardinal = CardinalFst().just_cardinals \n", + "graph_ordinal_regular_suffix = graph_strip_morpheme @ graph_cardinal" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hSpk5M7BuXRz" + }, + "source": [ + "Let's see if it works and gives us the desired cardinal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7cJ7fieouY2r" + }, + "outputs": [], + "source": [ + "example = \"sixième\" # dervied from six/6\n", + "apply_fst(example, graph_ordinal_regular_suffix)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GtEuV7sOuxek" + }, + "source": [ + "Now we can consider the edge cases. Beyond the first and second ordinals, French exhibits irregular behavior in the following cases:\n", + "- If the cardinal root ends with an \"e\", the \"e\" is dropped before adding the suffix (e.g. \"quatrième\"). \n", + "- Cardinals ending with \"cinq\", \"neuf\", and \"dix\" change their endings to \"cinqu\", \"neuv\" , and \"diz\" before appending the suffix, respectively. \n", + "\n", + "We could start by proposing a WFST that replaces the suffix \"ième\" with \"e\" and then compose this onto the Cardinal WFST. If it is a legitimate cardinal, then there will be a path through CardinalFST and the integer will be rendered as normal. \n", + "\n", + "Meanwhile, the case of \"dix\", \"cinq\", and \"neuf\" would each require a distinct WFST as they are each a consequence of different rules of orthography and phonology. Like the case with \"e\", we could change each back to its root and then see if the CardinalWFST will permit a path with the new input. \n", + "\n", + "It is at this point that we can do a cost-benefit analysis and realize that all these cases can be managed by an explicit `string_map/string_file`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_9KTNQeIw4sq" + }, + "outputs": [], + "source": [ + "graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", + " (\"cinquième\",\t\"cinq\"),\n", + " (\"neuvième\",\t\"neuf\"),\n", + " (\"onzième\",\t\"onze\"),\n", + " (\"douzième\",\t\"douze\"),\n", + " (\"treizième\",\t\"treize\"),\n", + " (\"quatorzième\",\t\"quatorze\"),\n", + " (\"quinzième\",\t\"quinze\"),\n", + " (\"seizième\",\t\"seize\"),\n", + " (\"trentième\",\t\"trente\"),\n", + " (\"quarantième\",\t\"quarante\"),\n", + " (\"cinquantième\",\t\"cinquante\"),\n", + " (\"soixantième\",\t\"soixante\"),\n", + " (\"millième\",\t\"mille\"),\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eo2_keFVqaY4" + }, + "source": [ + "We could then concatenate these with a WFST that accepts all tokens with these endings and then change the endings as desired. These will provide the cardinal roots just as effectively. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O7I29ezmxylx" + }, + "source": [ + "The same can be said for \"premier/première\" and \"second/seconde\":" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3JZoz51VyGS6" + }, + "outputs": [], + "source": [ + "graph_firsts = pynini.string_map([(\"premier\", \"un\"),(\"première\", \"un\")])\n", + "graph_seconds = pynini.string_map([(\"second\", \"deux\"),(\"seconde\", \"deux\")])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NJ9BGGAwyTQ5" + }, + "source": [ + "*Note: We graph separately to manage their different superscripts later on.*\n", + "\n", + "Depending on your language of focus, the choice of implicitly reversing the root token or explicitly mapping back to root will be the most efficient, but it is worth considering both options if only to check your understanding of the language." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8PgVwDRRq9gr" + }, + "source": [ + "Putting our grammar together, we have:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ko2kAeKwrRSH" + }, + "outputs": [], + "source": [ + "strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", + "\n", + "graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", + " (\"cinquième\",\t\"cinq\"),\n", + " (\"neuvième\",\t\"neuf\"),\n", + " (\"onzième\",\t\"onze\"),\n", + " (\"douzième\",\t\"douze\"),\n", + " (\"treizième\",\t\"treize\"),\n", + " (\"quatorzième\",\t\"quatorze\"),\n", + " (\"quinzième\",\t\"quinze\"),\n", + " (\"seizième\",\t\"seize\"),\n", + " (\"trentième\",\t\"trente\"),\n", + " (\"quarantième\",\t\"quarante\"),\n", + " (\"cinquantième\",\t\"cinquante\"),\n", + " (\"soixantième\",\t\"soixante\"),\n", + " (\"millième\",\t\"mille\"),\n", + "])\n", + "\n", + "# Component will accept all tokens that end with desired strings\n", + "graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", + "\n", + "graph_firsts = pynini.string_map([(\"premier\", \"un\"),(\"première\", \"un\")])\n", + "graph_seconds = pynini.string_map([(\"second\", \"deux\"),(\"seconde\", \"deux\")])\n", + "\n", + "graph_get_cardinal = pynini.union(graph_firsts, graph_seconds, graph_get_cardinal) \n", + "\n", + "graph_cardinal = CardinalFst().just_cardinals\n", + "\n", + "graph_ordinal = graph_get_cardinal @ graph_cardinal" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ESxY3LsCdE8q" + }, + "outputs": [], + "source": [ + "apply_fst(\"sixième\", graph_ordinal)\n", + "apply_fst(\"première\", graph_ordinal)\n", + "apply_fst(\"seconde\", graph_ordinal)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qo_g8UdoUFJB" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kemhdKAjzEIa" + }, + "source": [ + "Now that we've found a way to pass the work of the Ordinal grammar back onto the Cardinal grammar, we can move onto the Classifier. Like before, we need to inherit from `GraphFst` to properly insert token formatting and required attributes. As well, we will again use the `integer` property to tag our digit string.\n", + "\n", + "Indeed, the only major difference between the Ordinal Classifier and the Cardinal Classifier is the replacement of optional `negative` attribute with the `morphosyntactic_feature` attribute to indicate the superscript function." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EHM4Y3TW2nXT" + }, + "source": [ + "Since we are relying on the `CardinalFst` class in our grammar, we want to consider how to instantiate an instance of it. Since our ultimate goal is to build a Classifier that unites all semiotic classes, it makes sense to simply use the `CardinalFst` that we will need to call for our ITN and pass it as an argument to our new class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 273 + }, + "id": "KsmPhWSa3LF_", + "outputId": "9e881ca9-a926-4249-dda8-9c52175569b5" + }, + "outputs": [], + "source": [ + "def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"ordinal\", kind=\"classify\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CtBQ-udB3S5Q" + }, + "source": [ + "To clear up the namespace, we will now be importing from the NeMo implementation of `CardinalFst` for French." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "L-JAcidf4QQg" + }, + "outputs": [], + "source": [ + "from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst\n", + "\n", + "class OrdinalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"ordinal\", kind=\"classify\")\n", + " graph_cardinal = cardinal.graph_no_exception # NeMo equivalent to self.just_cardinals" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FQfkAqZavCAB" + }, + "source": [ + "We now add in our grammar:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uUQ4BLuivGut" + }, + "outputs": [], + "source": [ + "class OrdinalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"ordinal\", kind=\"classify\")\n", + " graph_cardinal = cardinal.graph_no_exception # may replace\n", + "\n", + " strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", + "\n", + " graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", + " (\"cinquième\",\t\"cinq\"),\n", + " (\"neuvième\",\t\"neuf\"),\n", + " (\"onzième\",\t\"onze\"),\n", + " (\"douzième\",\t\"douze\"),\n", + " (\"treizième\",\t\"treize\"),\n", + " (\"quatorzième\",\t\"quatorze\"),\n", + " (\"quinzième\",\t\"quinze\"),\n", + " (\"seizième\",\t\"seize\"),\n", + " (\"trentième\",\t\"trente\"),\n", + " (\"quarantième\",\t\"quarante\"),\n", + " (\"cinquantième\",\t\"cinquante\"),\n", + " (\"soixantième\",\t\"soixante\"),\n", + " (\"millième\",\t\"mille\"),\n", + " ])\n", + " \n", + " # Component will accept all tokens that end with desired strings\n", + " graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", + "\n", + " graph_firsts = pynini.string_map([(\"premier\", \"un\"),(\"première\", \"un\")])\n", + " graph_seconds = pynini.string_map([(\"second\", \"deux\"),(\"seconde\", \"deux\")])\n", + "\n", + " graph_get_cardinal = pynini.union(graph_firsts, graph_seconds, graph_get_cardinal) \n", + "\n", + " graph_ordinal = graph_get_cardinal @ graph_cardinal\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F_6EXPRMvnp2" + }, + "source": [ + "Now we come to the `morphosyntactic_features` property - a linguistic term for aspects of a word related to grammar. If intending to deploy your WFST through Sparrowhawk, this is the only ordinal property that is permitted (outside of the universal properties like `preserve_order`) and thus must carry all information regarding how to properly normalize the ordinal. (If Sparrowhawk deployment is not necessary, you may add additional properties to the tag.)\n", + "\n", + "How should we convey this information? Since the Verbalizer will be the main interface for our tags, it really does not matter - so long as we can reliably process the features. For the purposes of French, we just need `morphosyntactic_features` to decide the following:\n", + "- Insert the specific superscripts for \"premier/première\" or \"second/seconde\"\n", + "- Insert \"ᵉ\" otherwise\n", + "\n", + "We will also introduce another aspect of French Ordinals: they can be either plural or singular, identified by the suffix \"s\" on input and superscript \"ˢ\" on output. As such, our `morphosyntactic_features` should also decide the additional property:\n", + "- Insert the plural superscript " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "atctz6p-2GtV" + }, + "source": [ + "Since the default superscript is near universal, we will just specify this in our WFST and focus on the second and first ordinals as specific cases. We will create a `graph_morpheme` component that inserts the default superscript - indicated with a standard \"e\" to avoid possible encoding issues. We will then append a WFST that will graph any possible plural marker - \"s\" - as part the `morphosyntactic_features`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ui99osyP2UuQ" + }, + "outputs": [], + "source": [ + "graph_morpheme = pynutil.insert(\"e\") # Insert e superscript\n", + "graph_plural = pynini.closure(pynini.accep(\"s\"), 0, 1) # We create an acceptor since we must process the possible \"s\"\n", + "\n", + "graph_morpheme_component = graph_morpheme + graph_plural\n", + "\n", + "graph_morphosyntactic_features = (pynutil.insert(\" morphosyntactic_features: \\\"\") \n", + " + graph_morpheme_component\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QAlqubA25gq0" + }, + "source": [ + "Introducing the `integer` feature:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rs2TyIBc5la6" + }, + "outputs": [], + "source": [ + "graph_reg_ordinals = graph_get_cardinal @ graph_cardinal # Rewriting ordinals to remove the first and second ordinal.\n", + "\n", + "graph_ordinal = pynutil.insert(\"integer: \\\"\") + graph_reg_ordinals + pynutil.insert(\"\\\"\")\n", + "graph_ordinal += graph_morphosyntactic_features" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xoqk20Pi2gT8" + }, + "source": [ + "For the first and second ordinals, we can explicitly state their mappings, as these occurrences are invariable. (First and second ordinals do not need to accommodate being the endings of other terms.) As such, we can just have mappings from the token to the superscripts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "54aqdH_P63Ea" + }, + "outputs": [], + "source": [ + "firsts = pynini.string_map([(\"premier\", \"er\"), (\"première\",\"re\")])\n", + "firsts += graph_plural # Still accepts plural marker in superscript\n", + "seconds = pynini.string_map([(\"second\", \"d\"),(\"seconde\", \"de\")])\n", + "seconds += graph_plural \n", + "\n", + "graph_firsts = pynutil.insert(\"integer: \\\"1\\\" morphosyntactic_features: \\\"\") + firsts\n", + "graph_seconds = pynutil.insert(\"integer: \\\"2\\\" morphosyntactic_features: \\\"\") + seconds" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "D2vQ4m7o7p84" + }, + "source": [ + "Placing them in our class:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "w_JKT8JMf-Mz" + }, + "outputs": [], + "source": [ + "class OrdinalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"ordinal\", kind=\"classify\")\n", + " graph_cardinal = cardinal.graph_no_exception # may replace\n", + "\n", + " strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", + "\n", + " graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", + " (\"cinquième\",\t\"cinq\"),\n", + " (\"neuvième\",\t\"neuf\"),\n", + " (\"onzième\",\t\"onze\"),\n", + " (\"douzième\",\t\"douze\"),\n", + " (\"treizième\",\t\"treize\"),\n", + " (\"quatorzième\",\t\"quatorze\"),\n", + " (\"quinzième\",\t\"quinze\"),\n", + " (\"seizième\",\t\"seize\"),\n", + " (\"trentième\",\t\"trente\"),\n", + " (\"quarantième\",\t\"quarante\"),\n", + " (\"cinquantième\",\t\"cinquante\"),\n", + " (\"soixantième\",\t\"soixante\"),\n", + " (\"millième\",\t\"mille\"),\n", + " ])\n", + " \n", + " # Component will accept all tokens that end with desired strings\n", + " graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", + "\n", + " # Graph will map ordinals beyond second ordinal to their cardinals\n", + " graph_reg_ordinals = graph_get_cardinal @ graph_cardinal\n", + "\n", + " # Graphing morphosyntactic_features\n", + " graph_morpheme = pynutil.insert(\"e\") # Insert e superscript\n", + " graph_plural = pynini.accep(\"s\").ques # ques is equivalent to pynini.closure(, 0, 1)\n", + "\n", + " graph_morpheme_component = graph_morpheme + graph_plural\n", + "\n", + " graph_morphosyntactic_features = (pynutil.insert(\" morphosyntactic_features: \\\"\") \n", + " + graph_morpheme_component\n", + " )\n", + "\n", + " # Adding in the `integer` property:\n", + " graph_ordinal = pynutil.insert(\"integer: \\\"\") + graph_reg_ordinals + pynutil.insert(\"\\\"\")\n", + " graph_ordinal += graph_morphosyntactic_features \n", + "\n", + " # Case of first and second ordinals\n", + " firsts = pynini.string_map([(\"premier\", \"er\"), (\"première\",\"re\")])\n", + " firsts += graph_plural # Still accepts plural marker in superscript\n", + " seconds = pynini.string_map([(\"second\", \"d\"),(\"seconde\", \"de\")])\n", + " seconds += graph_plural \n", + "\n", + " graph_firsts = pynutil.insert(\"integer: \\\"1\\\" morphosyntactic_features: \\\"\") + firsts\n", + " graph_seconds = pynutil.insert(\"integer: \\\"2\\\" morphosyntactic_features: \\\"\") + seconds\n", + "\n", + " # All together\n", + " graph_ordinal = pynini.union(graph_ordinal, graph_firsts, graph_seconds)\n", + " self.fst = graph_ordinal.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CpGHVg6chmA0" + }, + "source": [ + "Trying out on some examples:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "b5DL3PZRhpc8" + }, + "outputs": [], + "source": [ + "cardinal = CardinalFst()\n", + "ordinal = OrdinalFst(cardinal).fst\n", + "\n", + "apply_fst(\"premier\", ordinal)\n", + "apply_fst(\"premiers\", ordinal)\n", + "apply_fst(\"seconde\", ordinal)\n", + "apply_fst(\"douzièmes\", ordinal)\n", + "apply_fst(\"cent-cinquièmes\", ordinal)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MNQVgiv-UK29" + }, + "source": [ + "### Special Tokens" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UdiNAHGh71O9" + }, + "source": [ + "If you are particularly astute, you may have noticed that we have not closed the quotations around the `morphosyntactic_features` throughout, despite doing so for `integer`. This is not a typo, as there is one more aspect of the Classifier that must be addressed: special cases.\n", + "\n", + "For your language, you may notice that there are occasional exceptions to writing rules that are signaled by a specific vocabulary token in a string. As this must be communicated to our Verbalizer, it is important that we signal this vocabulary through our Classifier. \n", + "\n", + "For French, this can occur in the normalization of centuries. When using Ordinals to indicate centuries, French commonly writes with Roman numerals. For example:\n", + "- \"Fifth century\" -> \"cinquième siècle\" -> `Vᵉ siècle` \n", + "- \"Twentieth century\" -> \"vintième siècle\" -> `XXᵉ siècle` \n", + "\n", + "As such, we must allow our Classifier to pass on the information that \"siècle\" follows an ordinal to our Verbalizer, so it may normalize with Roman numerals. We accomplish this by appending a WFST that accepts special tokens that follow our Ordinals, adding them to our `morphosyntactic_features` attribute with a forward slash to delineate." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "MsWnT4BfQKcC" + }, + "outputs": [], + "source": [ + "special_tokens = pynini.accep(\"siècle\")\n", + "\n", + "graph_special_tokens = delete_space + pynutil.insert(\"/\") + special_tokens # We need to delete the space in between this token and the following one.\n", + "graph_special_tokens = pynini.closure(graph_special_tokens, 0, 1)\n", + "\n", + "graph_ordinal += graph_special_tokens + pynutil.insert(\"\\\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "698_n5SFQ_jP" + }, + "source": [ + "*Once again, it is advised to retain a tsv file in `data` to quickly append these key-words.*\n", + "\n", + "Having taken care of the special case, we may now call `add_tokens` and complete the graph (fully written out below)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nZ1dkft0Riou" + }, + "outputs": [], + "source": [ + "class OrdinalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"ordinal\", kind=\"classify\")\n", + " graph_cardinal = cardinal.graph_no_exception # may replace\n", + "\n", + " strip_morpheme = pynutil.delete(\"ième\") # deletes suffix\n", + "\n", + " graph_root_change = pynini.string_map([(\"quatrième\", \"quatre\"),\n", + " (\"cinquième\",\t\"cinq\"),\n", + " (\"neuvième\",\t\"neuf\"),\n", + " (\"onzième\",\t\"onze\"),\n", + " (\"douzième\",\t\"douze\"),\n", + " (\"treizième\",\t\"treize\"),\n", + " (\"quatorzième\",\t\"quatorze\"),\n", + " (\"quinzième\",\t\"quinze\"),\n", + " (\"seizième\",\t\"seize\"),\n", + " (\"trentième\",\t\"trente\"),\n", + " (\"quarantième\",\t\"quarante\"),\n", + " (\"cinquantième\",\t\"cinquante\"),\n", + " (\"soixantième\",\t\"soixante\"),\n", + " (\"millième\",\t\"mille\"),\n", + " ])\n", + " \n", + " # Component will accept all tokens that end with desired strings\n", + " graph_get_cardinal = NEMO_SIGMA + (strip_morpheme | graph_root_change) \n", + "\n", + " # Graph will map ordinals beyond second ordinal to their cardinals\n", + " graph_reg_ordinals = graph_get_cardinal @ graph_cardinal\n", + "\n", + " # Graphing morphosyntactic_features\n", + " graph_morpheme = pynutil.insert(\"e\") # Insert e superscript\n", + " graph_plural = pynini.accep(\"s\").ques # We create an acceptor since we must process the possible \"s\"\n", + "\n", + " graph_morpheme_component = graph_morpheme + graph_plural\n", + "\n", + " graph_morphosyntactic_features = (pynutil.insert(\" morphosyntactic_features: \\\"\") \n", + " + graph_morpheme_component\n", + " )\n", + "\n", + " # Adding in the `integer` property:\n", + " graph_ordinal = pynutil.insert(\"integer: \\\"\") + graph_reg_ordinals + pynutil.insert(\"\\\"\")\n", + " graph_ordinal += graph_morphosyntactic_features \n", + "\n", + " # Case of first and second ordinals\n", + " firsts = pynini.string_map([(\"premier\", \"er\"), (\"première\",\"re\")])\n", + " firsts += graph_plural # Still accepts plural marker in superscript\n", + " seconds = pynini.string_map([(\"second\", \"d\"),(\"seconde\", \"de\")])\n", + " seconds += graph_plural \n", + "\n", + " graph_firsts = pynutil.insert(\"integer: \\\"1\\\" morphosyntactic_features: \\\"\") + firsts\n", + " graph_seconds = pynutil.insert(\"integer: \\\"2\\\" morphosyntactic_features: \\\"\") + seconds\n", + "\n", + "\n", + " # Special tokens\n", + " special_tokens = pynini.accep(\"siècle\")\n", + "\n", + " graph_special_tokens = delete_space + pynutil.insert(\"/\") + special_tokens # We need to delete the space in between this token and the following one.\n", + " graph_special_tokens = pynini.closure(graph_special_tokens, 0, 1)\n", + "\n", + " graph_ordinal += graph_special_tokens + pynutil.insert(\"\\\"\")\n", + "\n", + " # Finishing\n", + " graph_ordinal = self.add_tokens(graph_ordinal)\n", + " self.fst = graph_ordinal.optimize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7a4zBo-YS1QD" + }, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zYbrcGyGS2rW" + }, + "source": [ + "The initial part of the Ordinal Verbalizer is similar to the Cardinal WFST: we simply need to build a Verbalizer that inherits from `GraphFST` and removes the `integer` property tag. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "KUv99A_rYjb9" + }, + "outputs": [], + "source": [ + "class OrdinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"ordinal\", kind=\"verbalize\")\n", + " graph_integer = (\n", + " pynutil.delete(\"integer:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zKCt_EapZXGW" + }, + "source": [ + "Now we need to manage the `morphosyntactic_features` component. The first steps seem simple enough: delete the property tag and replace the superscript indicators with the actual superscripts. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yoa_mXMLabrU" + }, + "outputs": [], + "source": [ + " # Create mappings for all superscripts\n", + " superscript = pynini.union(\n", + " pynini.cross(\"e\", \"ᵉ\"), # only delete first quote since there may be more features\n", + " pynini.cross(\"d\", \"ᵈ\"),\n", + " pynini.cross(\"r\", \"ʳ\"),\n", + " pynini.cross(\"s\", \"ˢ\"),\n", + " )\n", + "\n", + " # Append to deletion of feature property. Note that we use plus closure for multiple superscripts.\n", + " graph_morphosyntactic_features = pynutil.delete(\" morphosyntactic_features: \\\"\") + superscript.plus" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xOA7_MsUrSJS" + }, + "source": [ + "### Romanization" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "K_SaG0DUa2t7" + }, + "source": [ + "Now we come to the possible Romanization component. Since we need to graph the superscript components as following the number, we want to design our graph so that `morphosyntactic_features` is the last component of the graph. However, we do not know that we need Romanization until we see the `morphosyntactic_features` component. As such, we need to design our graph such that two options are available initially for an input, but only one allows full traversal." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7dalc-tablG-" + }, + "source": [ + "![romanization.png](images/romanization.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mPTNCddNcEEE" + }, + "source": [ + "In cases where your WFST decisions are dependent on latter parts of an input string, permitting the union of two separate paths when only one is valid usually assists, as a standard pathing heuristic will only choose the valid path. \n", + "\n", + "In the case of French, this would require us to separate our Verbalizer into two parts: one for Arabic numerals and one for Roman numerals. For the Arabic WFST, we simply conclude the graph. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0YSy1PYOcuyD" + }, + "outputs": [], + "source": [ + "graph_integer = (\n", + " pynutil.delete(\"integer:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + "graph_Arabic = graph_integer + graph_morphosyntactic_features + pynutil.delete(\"\\\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nnXjUU5Pf7Sh" + }, + "source": [ + "For the Roman graph, things get a bit trickier. Ideally, we would want to build a WFST that maps each digit of `graph_Arabic` to a Roman equivalent. However, consider the following examples:\n", + "- 1 -> I\n", + "- 10 -> X\n", + "- 11 -> XI\n", + "- 100 -> C\n", + "- 101 -> CI\n", + "- 110 -> CX\n", + "- 111 -> CXI\n", + "\n", + "Since Roman numerals do not preserve powers of ten through digit placement, we will need to design separate FSTs for each digit position and apply them accordingly. As this can quickly become intensive, we will only work to enumerate the Ordinals from 1 to 100. (Note: We are doing this to accommodate centuries; there is little likelihood that any century beyond the 99th will be used in regular strings.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3-fQHMc2iQrz" + }, + "source": [ + "First we design our graphs for converting from Arabic to Roman numerals:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d6PDySykiXTh" + }, + "outputs": [], + "source": [ + "digits = pynini.string_map([(\"1\", \"I\"),\n", + " (\"2\",\t\"II\"),\n", + " (\"3\",\t\"III\"),\n", + " (\"4\",\t\"IV\"),\n", + " (\"5\",\t\"V\"),\n", + " (\"6\",\t\"VI\"),\n", + " (\"7\",\t\"VII\"),\n", + " (\"8\",\t\"VIII\"),\n", + " (\"9\",\t\"IX\"),\n", + " ])\n", + "tens = pynini.string_map([(\"1\", \"X\"),\n", + " (\"2\",\t\"XX\"),\n", + " (\"3\",\t\"XXX\"),\n", + " (\"4\",\t\"XL\"),\n", + " (\"5\",\t\"L\"),\n", + " (\"6\",\t\"LX\"),\n", + " (\"7\",\t\"LXX\"),\n", + " (\"8\",\t\"LXXX\"),\n", + " (\"9\",\t\"XC\"),\n", + " ])\n", + "zero = pynutil.delete(\"0\") # No Roman representation for zero." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wb-LmwJdk59m" + }, + "source": [ + "Now we build two separate filters: one will accept only single digit Arabic numerals and the other will accept two digit Arabic numerals. For this we can use `NEMO_DIGIT`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DW3oD7Hbli2X" + }, + "outputs": [], + "source": [ + "map_one_digit = NEMO_DIGIT\n", + "map_two_digits = NEMO_DIGIT ** 2 # pynini overloads the exponent function to allow self-concatenation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xtYKLy9AmJZS" + }, + "source": [ + "We now build mappings between two digit Arabic numerals and Roman numerals, composing them onto the filters:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dUy7uEUXmT_g" + }, + "outputs": [], + "source": [ + "graph_one_digit_romans = NEMO_DIGIT @ digits\n", + "\n", + "graph_two_digit_romans = tens + (digits | zero)\n", + "graph_two_digit_romans = map_two_digits @ graph_two_digit_romans\n", + "\n", + "graph_romans = graph_one_digit_romans | graph_two_digit_romans" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JEinyAMdm7RJ" + }, + "source": [ + "We now take care of the occurrence of \"siècle\" before composing onto `graph_integer`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ERO19BbynPNX" + }, + "outputs": [], + "source": [ + "graph_romans = (graph_integer @ graph_romans) + graph_morphosyntactic_features\n", + "graph_romans += pynini.cross(\"/\", \" \") + \"siècle\" + pynutil.delete(\"\\\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zN-fwrCGoToQ" + }, + "source": [ + "We finalize with a union and calling `delete_tokens`, the complete Verbalizer now being::" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "kr2wcToAofWB" + }, + "outputs": [], + "source": [ + "class OrdinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"ordinal\", kind=\"verbalize\")\n", + "\n", + " # Maps integer and removes attribute\n", + " graph_integer = (\n", + " pynutil.delete(\"integer:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + "\n", + " # Create mappings for all superscripts\n", + " superscript = pynini.union(\n", + " pynini.cross(\"e\", \"ᵉ\"), # only delete first quote since there may be more features\n", + " pynini.cross(\"d\", \"ᵈ\"),\n", + " pynini.cross(\"r\", \"ʳ\"),\n", + " pynini.cross(\"s\", \"ˢ\"),\n", + " )\n", + "\n", + " # Append to deletion of feature property. Note that we use plus closure for multiple superscripts.\n", + " graph_morphosyntactic_features = pynutil.delete(\" morphosyntactic_features: \\\"\") + superscript.plus\n", + "\n", + " # Writing WFST for Arabic\n", + " graph_Arabic = graph_integer + graph_morphosyntactic_features + pynutil.delete(\"\\\"\")\n", + "\n", + " # Mapping Roman numerals\n", + " digits = pynini.string_map([(\"1\", \"I\"),\n", + " (\"2\",\t\"II\"),\n", + " (\"3\",\t\"III\"),\n", + " (\"4\",\t\"IV\"),\n", + " (\"5\",\t\"V\"),\n", + " (\"6\",\t\"VI\"),\n", + " (\"7\",\t\"VII\"),\n", + " (\"8\",\t\"VIII\"),\n", + " (\"9\",\t\"IX\"),\n", + " ])\n", + " tens = pynini.string_map([(\"1\", \"X\"),\n", + " (\"2\",\t\"XX\"),\n", + " (\"3\",\t\"XXX\"),\n", + " (\"4\",\t\"XL\"),\n", + " (\"5\",\t\"L\"),\n", + " (\"6\",\t\"LX\"),\n", + " (\"7\",\t\"LXX\"),\n", + " (\"8\",\t\"LXXX\"),\n", + " (\"9\",\t\"XC\"),\n", + " ])\n", + " zero = pynutil.delete(\"0\") # No Roman representation for zero.\n", + "\n", + " # filters for Roman digits\n", + " map_one_digit = NEMO_DIGIT\n", + " map_two_digits = NEMO_DIGIT ** 2 # pynini overloads the exponent function to allow self-concatenation.\n", + "\n", + " # Composing onto roman digits\n", + " graph_one_digit_romans = NEMO_DIGIT @ digits\n", + "\n", + " graph_two_digit_romans = tens + (digits | zero)\n", + " graph_two_digit_romans = map_two_digits @ graph_two_digit_romans\n", + "\n", + " graph_romans = graph_one_digit_romans | graph_two_digit_romans\n", + "\n", + " # Writing WFST for Roman\n", + " graph_romans = (graph_integer @ graph_romans) + graph_morphosyntactic_features\n", + " graph_romans += pynini.cross(\"/\", \" \") + \"siècle\" + pynutil.delete(\"\\\"\")\n", + "\n", + " # Final composition\n", + " graph = (graph_romans | graph_Arabic)\n", + "\n", + " delete_tokens = self.delete_tokens(graph)\n", + " self.fst = delete_tokens.optimize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Trying out our examples:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "example_regular = 'ordinal { integer: \"12\" morphosyntactic_features: \"es\" }'\n", + "example_roman = 'ordinal { integer: \"12\" morphosyntactic_features: \"es/siècle\" }'\n", + "\n", + "fst = OrdinalFst().fst\n", + "\n", + "apply_fst(example_regular, fst)\n", + "apply_fst(example_roman, fst)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yBgLhTq9pWZe" + }, + "source": [ + "We have now completed an Ordinal WFST from the ground up, allowing a separate numbering system for special cases." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-W1-BMVJUXXk" + }, + "source": [ + "## Final notes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kR7E64P4pPU_" + }, + "source": [ + "Before moving on, there are some key takeaways that you may find useful for most (if not all) languages:\n", + "- Many ordinal systems rely on alteration of Cardinals. Even in the example of Korean, it is using a pre-existing counting system and adding a suffix to indicate ordering. As such, your Ordinal WFST will likely follow this tutorial's structure of changing the Ordinal to its original root and then relying on your Cardinal WFST for the majority of processing.\n", + "- The `morphosyntactic_features` property will carry the vast majority of information necessary for normalization through your Verbalizer.\n", + "- While not all writing systems have the same quirk as using Roman numerals in reference to centuries, you will likely find cases in your language when a specific token indicates unique rules for a semiotic class. Carrying this information to the Verbalizer is usually the simplest means of preserving the token while also facilitating normalization. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Rx8-LuJOUaa5" + }, + "source": [ + "# Decimal WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "D2MRXYxz8TGA" + }, + "source": [ + "\n", + "If the Cardinal WFST is the most crucial element of a normalization grammar, the construction of the Decimal WFST is a close second. Much like in the case of constructing Ordinals from Cardinal grammars, many aspects of the Decimal WFST will be reused throughout your other semiotic classes.\n", + "\n", + "To get started, you should study the numerical conventions in your language. In particular, you should take note of the following:\n", + "- How is the decimal component of a number pronounced in your language of focus. (e.g. The English number `1.33` can be verbalized as \"one point three three\" or \"one and thirty three hundredths.\")\n", + "- What is the punctuation mark used for decimal demarcation? (In North America, several writing systems use `.` while European nations will use `,`.)\n", + "- Are there general rules regarding pronunciation/formatting of numbers past the decimal demarcation? (e.g. Does your language pronounce each digit or pronounce as a series of three digit numbers?)\n", + "\n", + "Such questions will likely require some deep familiarity with the language, and it may benefit to ask a native speaker for some input. Of course, the level of depth is dependent on your needs, but researching these questions will help your normalization system appear more organic." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UsK78ib4N-gb" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "p4CLOOA9OAwZ" + }, + "source": [ + "In the case of French, we have the following guidelines:\n", + "- French uses the comma ( `,` ) for decimal delineation. It is articulated as \"virgule\".\n", + "- Decimals can be read as a series of digits or grouped as Cardinal numbers arbitrarily. (e.g. \"`.333` can be \"virgule trois trois trois\" or \"virgule trois-cent-trente-trois\".) \n", + "\n", + "As such, our grammar needs to accommodate the following pattern: \n", + "\n", + "`cardinal + \"virgule\" + string_of_cardinals`\n", + "\n", + "Given our experience with our previous WFSTs, this seems simple enough. We assume we have an instance of CardinalFST available and create a subcomponent to map the integer portion of a decimal:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XSp9FTzhf0XZ" + }, + "outputs": [], + "source": [ + "cardinal = CardinalFst().graph_no_exception # NeMo equivalent of just_cardinals\n", + "\n", + "# place cardinal under closure to permit values <=1\n", + "graph_integer = pynini.closure(cardinal, 0, 1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bk3_3iawgAZE" + }, + "source": [ + "Compose it on a subcomponent that detects the delineator \"virgule\":" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UMzfAKkngH6z" + }, + "outputs": [], + "source": [ + "delete_virgule = pynutil.delete(\"virgule\")\n", + "graph_decimal = graph_integer + delete_space + delete_virgule" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GXjbtbLYgn17" + }, + "source": [ + "And permit the occurrence of several strings of cardinals to follow:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LMMNBJz8gtTA" + }, + "outputs": [], + "source": [ + "graph_string_of_cardinals = delete_space + graph_cardinal\n", + "graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", + "\n", + "graph_decimal += graph_string_of_cardinals" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jTgnRLddhGdE" + }, + "source": [ + "Let us try an example:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "D4rjDh0ShJAp" + }, + "outputs": [], + "source": [ + "example = \"trois virgule trois cinquante-cinq\" \n", + "apply_fst(example, graph_decimal) # Should output only the cardinals in the string" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RfD1d9JOioyl" + }, + "source": [ + "### Ambiguity?" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3IaI1mCIe_6i" + }, + "source": [ + "Note that our decision to include multiple strings of cardinals after the decimal marker has introduced some ambiguity into our WFST. Consider if a decimal number was followed by an integer series (e.g. `2.5, 5, 6`). Now what should be an application of one DecimalFST and two applications of a CardinalFST can be interpreted as a single DecimalFST application (e.g. `2.556`). What can be done?\n", + "\n", + "While we will address this in greater depth later (see [Tokenize and Classify](#tokenize-and-classify)), the short answer is that cases such as these must be calibrated according to use and linguistic intuition. As this is an inherent ambiguity in the language and its writing system, we can never truly remove this possibility without restricting our ability to model the language. However, we can rely on a few logical assumptions to guide our decision making:\n", + "- Unless the grammar is deployed in a restrictive setting (e.g. a Financial or environment where strings of numbers are often read in series) it's not likely for a valid string to exhibit this level of ambiguity. Speakers typically try to reduce possible ambiguity in their language production and would likely rephrase to avoid issues such as these. [See Grice's maxims](https://en.wikipedia.org/wiki/Cooperative_principle).\n", + "- While a language may allow a specific string by *rule*, speakers may typically avoid them *in practice* due to conventions or difficulty. In our case, while it may be possible to read `2,100 05` as \"deux virgule dix-mille-cinq\" (\"two point ten-thousand and five\"), it's dubious that a speaker would find such easier to read than \"deux virgule une zéro zéro zéro cinq\". (The place value of large strings tend to take longer to recognize.)\n", + "\n", + "While hardly satisfying, these two points will allow us to dismiss *some* worry. With the former observation being outside our grammar's ability to manage, we accommodate the latter point by using an alternate WFST from our CardinalFST: `numbers_up_to_million`. (To utilize in your own language, create a WFST in the Cardinal class right before building up to `graph_millions`. Again, calling `optimize` is advised.)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "piNe1AWspa4J" + }, + "outputs": [], + "source": [ + "cardinal = CardinalFst().numbers_up_to_million\n", + "\n", + "# place cardinal under closure to permit values <=1\n", + "graph_integer = pynini.closure(cardinal, 0, 1)\n", + "\n", + "delete_virgule = pynutil.delete(\"virgule\")\n", + "graph_decimal = graph_integer + delete_space + delete_virgule\n", + "\n", + "graph_string_of_cardinals = delete_space + cardinal\n", + "graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", + "\n", + "graph_decimal += graph_string_of_cardinals" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B1gglt0tfM5V" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fVkOWkncgOZc" + }, + "source": [ + "Like with our previous WFSTs, the main duty for the classifier is inserting the necessary properties for the semiotic token. For the `decimal` tag, the following properties are used:\n", + "- `integer_part` - indicates value before decimal marker\n", + "- `fractional_part` - indicates values after the decimal marker\n", + "- `negative` - indicates if value is positive or negative (Optional)\n", + "- `quantity` - designates if decimal is in regards to a specific quantity. (See Quantities.)\n", + "\n", + "We can begin by inserting the `integer_part` around our `cardinal` subcomponent and the `fractional_part` around our `graph_string_of_cardinals`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_zw_cDszh-fB" + }, + "outputs": [], + "source": [ + "graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \")\n", + "graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bxlnn_7tiQMn" + }, + "source": [ + "We then concatenate them together with a component that recognizes and removes the decimal separator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BxNS9_AwiWHf" + }, + "outputs": [], + "source": [ + "graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.1) # In cases we don't always have an integer preceding\n", + "graph_decimal_no_sign = graph_integer_or_none + delete_space + pynutil.delete(\"virgule\") + graph_fractional" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "b7uGfsi4i5UI" + }, + "source": [ + "*Note that we allow insertion of 0 if there is no integer to accommodate reading of only decimal values*\n", + "\n", + "Now we allow the possibility of negative values. (Recall French uses \"moins\" to indicate the negative.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VsP79naojQZR" + }, + "outputs": [], + "source": [ + "graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", + "graph_decimal = graph_negative + graph_decimal_no_sign" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "QTcvq5HqllqW" + }, + "outputs": [], + "source": [ + "example = \"moins deux virgule cent-quatre\"\n", + "apply_fst(example, graph_decimal)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FVKuGj_9mZ75" + }, + "source": [ + "Placing within a `DecimalFst` class, we have:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tXwr32ermesp" + }, + "outputs": [], + "source": [ + "class DecimalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"decimal\", kind=\"classify\")\n", + " cardinal = cardinal.numbers_up_to_million\n", + " delete_virgule = pynutil.delete(\"virgule\")\n", + "\n", + " graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \") + delete_space\n", + " graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.001) # In cases we don't always have an integer preceding\n", + "\n", + " graph_string_of_cardinals = delete_space + cardinal\n", + " graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", + " graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_decimal_no_sign = graph_integer_or_none + pynutil.delete(\"virgule\") + graph_fractional \n", + "\n", + " graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", + " graph_negative = pynini.closure(graph_negative, 0, 1)\n", + "\n", + " graph_decimal = graph_negative + graph_decimal_no_sign\n", + "\n", + " graph = self.add_tokens(graph_decimal)\n", + " self.fst = graph.optimize()\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gjxI5mEKfHLo" + }, + "source": [ + "### Quantities" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3WuwWPf3py7G" + }, + "source": [ + "Recalling our earlier remarks regarding convention in language use, you may find a need to adjust the DecimalFst when processing specific values. For instance, consider the following equivalencies from English:\n", + "- `1,500,000` = \"one million five hundred thousand\" = \"one point five million\" = `1.5 million`\n", + "- `2,750,000` = \"two million seven hundred and fifty thousand\" = \"two point seven five million\" = `2.75 million`\n", + "\n", + "For large numbers, there is a tendency to use the decimal system as though one is describing a quantity. Notably, there is a minimum value for which this is comfortable. (A speaker of English may say \"three point five trillion\" but \"three point five hundred\" comes off as odd.)\n", + "\n", + "This behavior can occur in other languages. For example, the amount of `$1,500,000` may be read in French as \"une virgule cinq million de dollars\" (\"one point five million dollars\"). " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RgMBIKlYdsGz" + }, + "source": [ + "Our Classifier can be made to accommodate this behavior: we simply need to repeat what we did for `OrdinalFst` and set aside several key terms to trigger our model. For French, we will choose all terms added for values greater than a million. (Chosen empirically.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vEcsUXw5fUEe" + }, + "outputs": [], + "source": [ + "suffix = pynini.union(\n", + " \"million\",\n", + " \"millions\",\n", + " \"milliard\",\n", + " \"milliards\",\n", + " \"billion\",\n", + " \"billions\",\n", + " \"billiard\",\n", + " \"billiards\",\n", + " \"trillion\",\n", + " \"trillions\",\n", + " \"trilliard\",\n", + " \"trilliards\",\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wIIUAsR-fgQA" + }, + "source": [ + "We will then need to use a WFST to graph any numbers the precede these amounts. Note, unlike for our `DecimalFst`, we need to permit cardinals as well as decimals. This is because we want to be able to normalize a phrase like \"three million\" to `3 million` as this will be less obtrusive than `3,000,000`.\n", + "\n", + "As such, we will call a `CardinalFst` and a `DecimalFst` in for `graph_quantities`. Since these are both utilized for our `DecimalFst`, it would be more efficient to just pass them along as function/class variables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yern-idtycWg" + }, + "outputs": [], + "source": [ + "def get_quantity(decimal, cardinal_up_to_thousand):\n", + " key_values = pynini.union(\n", + " \"million\",\n", + " \"millions\",\n", + " \"milliard\",\n", + " \"milliards\",\n", + " \"billion\",\n", + " \"billions\",\n", + " \"billiard\",\n", + " \"billiards\",\n", + " \"trillion\",\n", + " \"trillions\",\n", + " \"trilliard\",\n", + " \"trilliards\",\n", + " )\n", + " # The French WFST that this borrows from has not removed leading zeroes yet.\n", + " numbers = cardinal_up_to_thousand @ (\n", + " pynutil.delete(pynini.closure(\"0\")) + pynini.difference(NEMO_DIGIT, \"0\") + pynini.closure(NEMO_DIGIT)\n", + " )\n", + " res = (\n", + " pynutil.insert(\"integer_part: \\\"\")\n", + " + numbers\n", + " + pynutil.insert(\"\\\"\")\n", + " + (\n", + " pynini.union(delete_hyphen, delete_extra_space)\n", + " ) # Can be written either as 'deux-millions' or 'deux millions' depending on whether it registers as a noun or part of cardinal.\n", + " + pynutil.insert(\" quantity: \\\"\")\n", + " + suffix\n", + " + pynutil.insert(\"\\\"\")\n", + " )\n", + " # Union with decimal to permit either a cardinal or decimal representation.\n", + " res |= decimal + delete_extra_space + pynutil.insert(\" quantity: \\\"\") + suffix + pynutil.insert(\"\\\"\")\n", + " return res" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uT4LMo8ADBAq" + }, + "source": [ + "We can now insert this into our Classifier, producing the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d2KrCuyGDLwh" + }, + "outputs": [], + "source": [ + "class DecimalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"decimal\", kind=\"classify\")\n", + " quantities_cardinal = cardinal.graph_hundreds_component_at_least_one_none_zero_digit\n", + " cardinal = cardinal.graph_no_exception\n", + " delete_virgule = pynutil.delete(\"virgule\")\n", + "\n", + " graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \") + delete_space\n", + " graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.001) # In cases we don't always have an integer preceding\n", + "\n", + " graph_string_of_cardinals = delete_space + cardinal\n", + " graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", + " graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_decimal_no_sign = graph_integer_or_none + delete_virgule + graph_fractional \n", + "\n", + " graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", + " graph_negative = pynini.closure(graph_negative, 0, 1)\n", + " graph_decimal = graph_negative + graph_decimal_no_sign\n", + "\n", + " # Union default decimal with version that accepts quantities\n", + " graph_decimal |= graph_negative + get_quantity(\n", + " graph_decimal_no_sign, quantities_cardinal\n", + " )\n", + " final_graph = self.add_tokens(graph_decimal)\n", + " self.fst = final_graph.optimize()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cD-eKqO6qTyh" + }, + "outputs": [], + "source": [ + "cardinal = CardinalFst()\n", + "decimal = DecimalFst(cardinal).fst\n", + "example = \"trois virgule cent-quatre billion\"\n", + "apply_fst(example, decimal)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HiSLKF3RfRZA" + }, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QnkOV5FlteQA" + }, + "source": [ + "As before, the Verbalizer is responsible for removing the formatting and rendering a given token in conventional form. As the process remains similar to Ordinals and Cardinals (deleting strings in a regular matter) we will instead focus on a unique concern for `DecimalFst`: numeral spacing.\n", + "\n", + "For some writing systems, decimal numbers and other strings are typically not written as a single string, instead using punctuation to group numbers for clarity. For example, in the United States, integer digits greater than a thousand are separated by commas for every three digits:\n", + "- `12345.678` -> `12,345.678`\n", + "\n", + "A similar rule occurs in French, save it employs spaces on each side of the decimal marker:\n", + "- `12345,6789` -> `12 345,678 9`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2h4WQZ1a4Cpc" + }, + "source": [ + "While simple enough, this rule poses a slight complication: it works from the left and right of the decimal separator, whereas WFSTs process linearly from the beginning (or end) of strings. As such we will need to break the formatting rule into two components: one for the integer component and one for the decimal component." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ViOFNdZw4-qu" + }, + "source": [ + "Starting with the integer component, we need our subcomponent to recognize every three digits and insert a space before. We can achieve this with some `graph_utils` helper objects - `NEMO_DIGIT` and `NEMO_NON_BREAKING_SPACE`, which accept all digits and non-breaking spaces, respectively. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Z36be2Vo5VbR" + }, + "outputs": [], + "source": [ + "every_three_digits = NEMO_DIGIT ** 3 # accepts a string of three digits\n", + "space_every_three_integer = pynini.closure(NEMO_NON_BREAKING_SPACE + every_three_digits) # inserts space before every three digits." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RSB2gGH-5vwi" + }, + "source": [ + "However, we cannot let the component insert spaces when there are *only* three digits (e.g. `100`.) As such, we need to make sure the insertion only begins starting from the beginning of a string (e.g. when there is a string between one and three digits.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wfWp3ghH6mDQ" + }, + "outputs": [], + "source": [ + "space_every_three_integer = pynini.closure(NEMO_DIGIT, 1, 3) + space_every_three_integer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NJrQYSfA6vyu" + }, + "source": [ + "For the case of the decimal spacing, we simply reverse the logic:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vBP6ncTp6yXX" + }, + "outputs": [], + "source": [ + "space_every_three_decimal = pynini.closure(NEMO_NON_BREAKING_SPACE + every_three_digits)\n", + "space_every_three_decimal = space_every_three_decimal + pynini.closure(NEMO_DIGIT, 1, 3)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WRXPN_gk69VV" + }, + "source": [ + "Placed into our Verbalizer, we would see the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "h49eztvs7BXH" + }, + "outputs": [], + "source": [ + "class DecimalFst(GraphFst):\n", + " \"\"\"\n", + " Finite state transducer for verbalizing decimal, e.g.\n", + " decimal { negative: \"true\" integer_part: \"12\" fractional_part: \"5006\" quantity: \"billion\" } -> -12.5006 billion\n", + " \"\"\"\n", + "\n", + " def __init__(self):\n", + " super().__init__(name=\"decimal\", kind=\"verbalize\")\n", + "\n", + " # Need parser to group digits by threes\n", + " exactly_three_digits = NEMO_DIGIT ** 3\n", + " at_most_three_digits = pynini.closure(NEMO_DIGIT, 1, 3)\n", + "\n", + " space_every_three_integer = (\n", + " at_most_three_digits + (pynutil.insert(NEMO_NON_BREAKING_SPACE) + exactly_three_digits).closure()\n", + " )\n", + " space_every_three_decimal = (\n", + " pynini.accep(\",\")\n", + " + (exactly_three_digits + pynutil.insert(NEMO_NON_BREAKING_SPACE)).closure()\n", + " + at_most_three_digits\n", + " )\n", + " group_by_threes = space_every_three_integer | space_every_three_decimal\n", + " self.group_by_threes = group_by_threes\n", + "\n", + " optional_sign = pynini.closure(pynini.cross(\"negative: \\\"true\\\"\", \"-\") + delete_space, 0, 1)\n", + " integer = (\n", + " pynutil.delete(\"integer_part:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " integer = integer @ group_by_threes\n", + " optional_integer = pynini.closure(integer + delete_space, 0, 1)\n", + " fractional = (\n", + " pynutil.insert(\",\")\n", + " + pynutil.delete(\"fractional_part:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " fractional = fractional @ group_by_threes\n", + " optional_fractional = pynini.closure(fractional + delete_space, 0, 1)\n", + " quantity = (\n", + " pynutil.delete(\"quantity:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " optional_quantity = pynini.closure(pynutil.insert(\" \") + quantity + delete_space, 0, 1)\n", + " graph = (optional_integer + optional_fractional + optional_quantity).optimize()\n", + " self.numbers = graph # Saving just the part of the graph used for numbers\n", + " graph = optional_sign + graph\n", + " delete_tokens = self.delete_tokens(graph)\n", + " self.fst = delete_tokens.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Trying out some examples:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fst = DecimalFst().fst\n", + "\n", + "example1 = 'decimal { integer_part: \"3\" fractional_part: \"10453\" quantity: \"billion\" }'\n", + "example2 = 'decimal { integer_part: \"22323\" fractional_part: \"104553\" }'\n", + "\n", + "apply_fst(example1, fst)\n", + "apply_fst(example2, fst)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CZbshZCW8clI" + }, + "source": [ + "# Money WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xuiv8HMz7yjm" + }, + "source": [ + "Now that we've handled some of the foundational classes, it's time to see how they build up to permit more concrete ones. Let's see how the previous WFSTs assist in building a WFST for normalizing currency: the `MoneyFst`. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wTU2c7MtUpqF" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qqyRm8Ru8TDf" + }, + "source": [ + "While the exact phrasing will vary, a valid string for currency will possess the following qualities:\n", + "- A major and/or minor denomination of currency\n", + "- A numeric quantity of the denomination \n", + "\n", + "As our `CardinalFst` and `OrdinalFst` already allow us to normalize the quantity, the only issue for `MoneyFst` is to graph the amounts and build a vocabulary to recognize the denominations.\n", + "\n", + "For French, we will use the following examples to build upon:\n", + "- \"une euros\" -> `1 €`\n", + "- \"deux euros\" -> `2 €` \n", + "- \"deux euros cinq\" -> `2,5 €` \n", + "- \"cinq centimes\" -> `0,5 €`\n", + "- \"deux billions de euros\" -> `2 billions de euros`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FMqUir9n9_cA" + }, + "source": [ + "These suggest the following requirements of our grammar:\n", + "- There must be a mapping between \"euro\" and \"centime\" and `€` in our vocabulary\n", + "- This mapping must allow both singular and plural forms\n", + "- The currency denomination is phrased between major and minor denominations (\"une euro cinq\" and not \"une cinq euro\")\n", + "- Large quantities of currency are left 'as is' instead of normalized\n", + "\n", + "We may deal with the vocabulary in the typical fashion:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XN9nbNhB-vEV" + }, + "outputs": [], + "source": [ + "major_currency = pynini.string_map([(\"euro\", \"€\")])\n", + "minor_currency = pynini.string_map([(\"centime\", \"€\")])\n", + "\n", + "graph_plural = pynutil.delete(\"s\").ques\n", + "\n", + "major_currency += graph_plural\n", + "minor_currency += graph_plural" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3aHrm1qPAc-f" + }, + "source": [ + "Moving to the numbers, note that we need to append a leading zero to the value of fractional currency amounts (\"five cents\" -> `$0.05`). We bring back the subgraph from `CardinalFst` that maps tokens to numbers without tokenization to assist with this:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jwi-yQW1AjvG" + }, + "outputs": [], + "source": [ + "from nemo_text_processing.inverse_text_normalization.fr.taggers import cardinal\n", + "\n", + "cardinal_graph = cardinal.CardinalFst()\n", + "graph_cardinal = cardinal_graph.graph_no_exception # graphs cardinals w/o tokenization\n", + "\n", + "add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", + "graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let us consider how to manage arge quantities of currency. In our example (\"deux billions de euros\" -> `2 billions de euros`) we see that its behavior mirrors that of our `get_quantity` portion of `DecimalFst`. As such, it would be useful if there was a subcomponent of that graph that we could use in here. Like in the case of `CardinalFst`, let us go back and create a subgraph for later use. Since all our quantities are positive, this would be best accomplished right before incorporating the `negative` property, creating a `self.final_graph_wo_negative`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class DecimalFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst):\n", + " super().__init__(name=\"decimal\", kind=\"classify\")\n", + " quantities_cardinal = cardinal.graph_hundreds_component_at_least_one_none_zero_digit\n", + " cardinal = cardinal.graph_no_exception\n", + " delete_virgule = pynutil.delete(\"virgule\")\n", + "\n", + " graph_integer = pynutil.insert(\"integer_part: \\\"\") + cardinal + pynutil.insert(\"\\\" \") + delete_space\n", + " graph_integer_or_none = graph_integer | pynutil.insert(\"integer_part: \\\"0\\\" \", weight=.001) # In cases we don't always have an integer preceding\n", + "\n", + " graph_string_of_cardinals = delete_space + cardinal\n", + " graph_string_of_cardinals = pynini.closure(graph_string_of_cardinals, 1)\n", + " graph_fractional = pynutil.insert(\"fractional_part: \\\"\") + graph_string_of_cardinals + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_decimal_no_sign = graph_integer_or_none + delete_virgule + graph_fractional \n", + "\n", + " ### NEW GRAPH HERE\n", + " self.final_graph_wo_negative = graph_decimal_no_sign | get_quantity(\n", + " final_graph_wo_sign, cardinal.graph_hundreds_component_at_least_one_none_zero_digit\n", + " )\n", + " \n", + " graph_negative = pynini.cross(\"moins\", \"negative: \\\"-\\\" \") + delete_space\n", + " graph_negative = pynini.closure(graph_negative, 0, 1)\n", + " graph_decimal = graph_negative + graph_decimal_no_sign\n", + "\n", + " # Union default decimal with version that accepts quantities\n", + " graph_decimal |= graph_negative + get_quantity(\n", + " graph_decimal_no_sign, quantities_cardinal\n", + " )\n", + " final_graph = self.add_tokens(graph_decimal)\n", + " self.fst = final_graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Allowing us to change our grammar to:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nemo_text_processing.inverse_text_normalization.fr.taggers import cardinal, decimal\n", + "\n", + "cardinal_graph = cardinal.CardinalFst()\n", + "decimal_graph = decimal.DecimalFst(cardinal_graph)\n", + "\n", + "graph_cardinal = cardinal_graph.graph_no_exception # graphs cardinals w/o tokenization\n", + "graph_decimal = decimal_graph.final_graph_wo_negative # graphs positive decimals w/o tokenization\n", + "\n", + "add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", + "graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "L1RHoW-TLzIz" + }, + "source": [ + "Note that by doing this, we're also incorporating the formatting from the `decimal` class up to this point. Since these overlap with the `money` class (see next section), we have saved ourselves some work. \n", + "\n", + "Since we already made `graph_quantity` part of our `DecimalFst`, we can avoid dealing with large quantities now. However, this does mean we still need a way to leave currencies 'as is' without normalization. We can do this by using the `project` method, which will create a WFST that excepts either all valid inputs or all valid outputs of another WFST (depending on argument)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7l_TLtJkMluU" + }, + "outputs": [], + "source": [ + "major_currency_no_normalize = major_currency.project(\"input\")\n", + "apply_fst(\"euro\", major_currency_no_normalize)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "raBdHc_WXEpG" + }, + "source": [ + "We then append this WFST with a WFST that recognizes prepositions commonly used before large values of currency (\"d'\", \"des\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CEuxiVgDXRBf" + }, + "outputs": [], + "source": [ + "graph_preposition = pynini.union(\"des \", \"d'\") # Used for large amounts (billions de euros)\n", + "major_currency_no_normalize = pynini.closure(graph_preposition, 0, 1) + major_currency.project(\"input\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FlXmf8Fq_Rm1" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T5BBuQRzLuXS" + }, + "source": [ + "For the Money semiotic class, we have available the following properties for tokenization:\n", + "- `integer_part`\n", + "- `fractional_part` \n", + "- `currency`\n", + "\n", + "Laying the initial groundwork seems simple enough. We first instantiate our `MoneyFst` classifier with our initial grammars:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EZaCeHcFWVP3" + }, + "outputs": [], + "source": [ + "class MoneyFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst, decimal: GraphFst):\n", + " super().__init__(name=\"money\", kind=\"classify\")\n", + " major_currency = pynini.string_map([(\"euro\", \"€\")])\n", + " minor_currency = pynini.string_map([(\"centime\", \"€\")])\n", + "\n", + " graph_plural = pynutil.delete(\"s\").ques\n", + "\n", + " major_currency += graph_plural\n", + " minor_currency += graph_plural\n", + "\n", + " major_currency_no_normalize = major_currency.project(\"input\")\n", + " graph_preposition = pynini.union(\"des \", \"d'\") # Used for large amounts (billions de euros)\n", + " major_currency_no_normalize = graph_preposition + major_currency.project(\"input\")\n", + "\n", + " graph_cardinal = cardinal.graph_no_exception\n", + " graph_decimal = decimal.final_graph_wo_negative\n", + "\n", + " add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", + " graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_bpkXroLWaBo" + }, + "source": [ + "Let us now manage the `currency` property. We have the following scenarios to consider:\n", + "- Major denomination only\n", + "- Minor denomination only\n", + "- Major denomination and implicit minor denomination (\"cinq euro trois\")\n", + "- Major denomination and explicit minor denomination (\"cinq euros et trois centimes\")\n", + "- Large quantities of euros (\"cinq billion des euros\")\n", + "\n", + "Note how across cases the use of `graph_cardinal` and `graph_decimal` will be applied differently. Further, we may have varying orders in which tags are assigned proper values. For instance, if we have only minor denomination we would assign `fractional_part` before `currency`. Meanwhile, major denomination and implicit minor denomination would be the order of `integer_part`, `currency`, `fractional_part`. While we could try and figure out a way to preserve order, recall that the use of permutations in NeMo ITN makes that unnecessary: we can assume the desired order of tags reach our Verbalizer without make overt efforts in our Classifier! \n", + "\n", + "For example, let's say we need to process \"five dollars\" as `$5.00`. Processed linearly, we could get a token sequence along the lines of: `{ integer_part: \"5\" currency: \"$\" }`. If we passed this token array straight to a Verbalizer, we would need to configure a graph that effectively reverses the order so we could parse the `currency` field prior to the `integer_part` field, perhaps something along the lines of: \n", + "\n", + "`pynutil.insert(\"$\") + delete_space + pynutil.delete('integer_part: \\\"') +.... + pynutil.delete('currency: \"$\"')`\n", + "\n", + "But since NeMo creates permutations of our Classifier outputs, this is unnecessary. We can simply assume whatever would be the most convenient order for us (e.g. `{ currency: \"$\" integer_part: \"5\" }`) and build our Verbalizer around that:\n", + "\n", + "`pynutil.delete('currency: \\\"') + NEMO_SIGMA + pynutil.delete('\\\" integer_part: \\\"') + NEMO_DIGIT +...`\n", + "\n", + "Along with helping to keep our script simpler (we can focus simply on tokenization and not worry about what input order our Verbalizers will accept), this also allows us to overcome structural constraints of WFSTs, namely that they are [limited in reordering text strings](https://en.wikipedia.org/wiki/Pushdown_automaton)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fMZ13D2Dh9ZF" + }, + "source": [ + "Keeping this in mind, let's begin mapping the proper tags. Since they're relatively simple, we can start with only major and minor denominations:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "EtwWLp7VbbjM" + }, + "outputs": [], + "source": [ + "graph_integer_component = pynutil.insert(\"integer_part: \\\"\") + graph_cardinal + pynutil.insert(\"\\\"\")\n", + "graph_fractional_component = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\")\n", + "\n", + "graph_major_currency = pynutil.insert(\" currency: \\\"\") + major_currency + pynutil.insert(\"\\\"\")\n", + "graph_minor_currency = pynutil.insert(\" currency: \\\"\") + minor_currency + pynutil.insert(\"\\\"\")\n", + "\n", + "graph_only_major_money = graph_integer_component + delete_space + graph_major_currency\n", + "graph_only_minor_money = graph_fractional_component + delete_space + graph_minor_currency " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XTmxrK4DmS39" + }, + "source": [ + "Now we may append the case of an implicit `fractional_part` to `graph_only_major_money`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Zvzn3pQinkT0" + }, + "outputs": [], + "source": [ + "implicit_fractional_part = delete_space + pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", + "implicit_fractional_part = pynini.closure(implicit_fractional_part, 0, 1) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tKFZkCVmn1OX" + }, + "source": [ + "And the explicit fractional portion:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d_h0pTlMn3jz" + }, + "outputs": [], + "source": [ + "delete_et = pynutil.delete(\"et \") # Sometimes prefaces the minor currency\n", + "delete_et = pynini.closure(delete_et, 0 , 1)\n", + "\n", + "delete_minor = pynutil.delete(minor_currency.project(\"input\")) # to remove the minor currency\n", + "\n", + "explicit_fractional_part = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", + "explicit_fractional_part = delete_space + delete_et + explicit_fractional_part + delete_space + delete_minor\n", + "explicit_fractional_part = pynini.closure(explicit_fractional_part, 0, 1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rvnpAudgo-o3" + }, + "source": [ + "We join them together:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qYzlIRWTpD8e" + }, + "outputs": [], + "source": [ + "graph_major_money = graph_only_major_money + (implicit_fractional_part | explicit_fractional_part)\n", + "graph_standard_money = graph_major_money | graph_only_minor_money" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TzeaKXVzpYs8" + }, + "source": [ + "Finishing with the case the large quantities of money, we need to use `graph_decimal` so we can exploit its ability to map quantities. Note that since we are using a pre-existing WFST, we can ignore inserting the tags ourselves, since this is already done by the Decimal WFST. As long as we remember to process this aspect with our Verbalizer, we can spare ourselves the extra step." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LnqX9mGFpmJm" + }, + "outputs": [], + "source": [ + "graph_large_money = pynutil.insert(\" currency: \\\"\") + major_currency_no_normalize + pynutil.insert(\"\\\"\")\n", + "graph_large_money = graph_decimal + delete_space + graph_large_money" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "24TUZnJKqgPA" + }, + "source": [ + "Alltogether, this would give the following Classifier:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "B7-muCO2qizg" + }, + "outputs": [], + "source": [ + "class MoneyFst(GraphFst):\n", + " def __init__(self, cardinal: GraphFst, decimal: GraphFst):\n", + " super().__init__(name=\"money\", kind=\"classify\")\n", + " major_currency = pynini.string_map([(\"euro\", \"€\")])\n", + " minor_currency = pynini.string_map([(\"centime\", \"€\")])\n", + "\n", + " graph_plural = pynutil.delete(\"s\").ques\n", + "\n", + " major_currency += graph_plural\n", + " minor_currency += graph_plural\n", + "\n", + " major_currency_no_normalize = major_currency.project(\"input\")\n", + " graph_preposition = pynini.union(\"des \", \"d'\") # Used for large amounts (billions de euros)\n", + " major_currency_no_normalize = graph_preposition + major_currency.project(\"input\")\n", + "\n", + " graph_cardinal = cardinal.graph_no_exception\n", + " graph_decimal = decimal.final_graph_wo_negative\n", + "\n", + " add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert(\"0\") + NEMO_DIGIT)\n", + " graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit\n", + "\n", + " graph_integer_component = pynutil.insert(\"integer_part: \\\"\") + graph_cardinal + pynutil.insert(\"\\\"\")\n", + " graph_fractional_component = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_major_currency = pynutil.insert(\" currency: \\\"\") + major_currency + pynutil.insert(\"\\\"\")\n", + " graph_minor_currency = pynutil.insert(\" currency: \\\"\") + minor_currency + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_only_major_money = graph_integer_component + delete_space + graph_major_currency\n", + " graph_only_minor_money = graph_fractional_component + delete_space + graph_minor_currency \n", + "\n", + " implicit_fractional_part = delete_space + pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", + " implicit_fractional_part = pynini.closure(implicit_fractional_part, 0, 1) \n", + "\n", + "\n", + " delete_et = pynutil.delete(\"et \") # Sometimes prefaces the minor currency\n", + " delete_et = pynini.closure(delete_et, 0 , 1)\n", + "\n", + " delete_minor = pynutil.delete(minor_currency.project(\"input\")) # to remove the minor currency\n", + "\n", + " explicit_fractional_part = pynutil.insert(\"fractional_part: \\\"\") + graph_fractional_values + pynutil.insert(\"\\\"\") \n", + " explicit_fractional_part = delete_space + delete_et + explicit_fractional_part + delete_space + delete_minor\n", + " explicit_fractional_part = pynini.closure(explicit_fractional_part, 0, 1)\n", + "\n", + " graph_major_money = graph_only_major_money + (implicit_fractional_part | explicit_fractional_part)\n", + "\n", + " graph_large_money = pynutil.insert(\" currency: \\\"\") + major_currency_no_normalize + pynutil.insert(\"\\\"\")\n", + " graph_large_money = graph_decimal + delete_space + graph_large_money\n", + "\n", + " final_graph = graph_large_money | graph_major_money | graph_only_minor_money\n", + "\n", + " final_graph = self.add_tokens(final_graph)\n", + " self.fst = final_graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see the results:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nemo_text_processing.inverse_text_normalization.fr.taggers import decimal, cardinal\n", + "\n", + "cardFst = cardinal.CardinalFst()\n", + "decFst = decimal.DecimalFst(cardFst)\n", + "\n", + "moneyFst = MoneyFst(cardFst, decFst).fst\n", + "\n", + "example = \"douze virgule cinq billions d'euros\"\n", + "\n", + "apply_fst(example, moneyFst)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gxdcyuLmAZZa" + }, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZZFDWNwY6sOG" + }, + "source": [ + "By this point, the creation of the Verbalizer should be rather straight-forward - delete the expected tokens and perform any specific formatting that was not caught by the Classifier. \n", + "\n", + "In fact, it is so straight-forward that much of the work does not even need to be explicitly managed by the Verbalizer. As mentioned previously, two of the properties we inserted in our Classifier where already referenced in our `DecimalFst` - `integer_part` and `fractional_part`. We even went so far to directly call a component of `DecimalFst` in our Classifier. As such, outside of the `currency` property - there is little in our Money token that is different from a standard Decimal token. Indeed, even the normalized forms are similar (`200,5` vs. `200,5 €`.) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "T7sgH0t79tmU" + }, + "source": [ + "Given these similarities, it seems that we can save ourselves some work and simply use the Decimal Verbalizer to manage much of the normalization. Let's look at the basic format of our `MoneyFst` verbalizer, writing it so it accepts a `DecimalFst` as input:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BEu8nITP9mSG" + }, + "outputs": [], + "source": [ + "class MoneyFst(GraphFst):\n", + " def __init__(self, decimal: GraphFst):\n", + " super().__init__(name=\"money\", kind=\"verbalize\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JYVLou5N-Dk8" + }, + "source": [ + "We manage the issue of deleting the `currency` property:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LO35tJ7G-H6N" + }, + "outputs": [], + "source": [ + "class MoneyFst(GraphFst):\n", + " def __init__(self, decimal: GraphFst):\n", + " super().__init__(name=\"money\", kind=\"verbalize\")\n", + " unit = (\n", + " pynutil.delete(\"currency:\")\n", + " + delete_extra_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bDS8XSII-Dpd" + }, + "source": [ + "Now consider, we need to normalize an integer component, a fractional component, and a decimal to separate them. Since NeMo will automatically permutate all tags, we can assume whatever order we want. As such, we can assume we get the exact order that is accepted by our `DecimalFst`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VtGfpjVA-r3u" + }, + "outputs": [], + "source": [ + " def __init__(self, decimal: GraphFst):\n", + " super().__init__(name=\"money\", kind=\"verbalize\")\n", + " unit = (\n", + " pynutil.delete(\"currency:\")\n", + " + delete_extra_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_NOT_QUOTE, 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " graph = decimal.numbers + delete_space + unit\n", + " delete_tokens = self.delete_tokens(graph)\n", + " self.fst = delete_tokens.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZefxZLIU-uRU" + }, + "source": [ + "It is as simple and compact as appending the `unit` component to the preexisting `decimal.numbers`. \n", + "\n", + "This feature is worth keeping in mind as you build up to more concrete classes: the combination of guaranteed tag permutations and prebuilt Verbalizers make the addition of semiotic classes progressively simpler despite the building complexity of your entire grammar." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WydC7Cn28l5Y" + }, + "source": [ + "# Time WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VelunbumCJJe" + }, + "source": [ + "Our next composite graph will be for the Time WFST. Here, you may see more variation between your language and our example than with our previous classes. This is for a number of reasons, among them being that while there may be some standard cross linguistic patterns regarding time (e.g. `quantity_of_hours + quantity_of_minutes`), the use of various equivalent phrases can make an exhaustive grammar incredibly specific (e.g. consider managing \"twelve fifteen\", \"twelve and a quarter\", \"quarter past twelve\", \"quarter after twelve\", and \"forty five until one\" all together). You may find yourself drawing upon WFSTs that accommodate Cardinals, Fractions, and some basic subtraction.\n", + "\n", + "As such, we are going to focus on those aspects of the Time WFST that are necessary for a functional normalization of time related phrases, saving a more exhaustive grammar for your own specific languages and use cases." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8wqb28wzATOR" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AVntDM3AEz0v" + }, + "source": [ + "For our Time WFST, we will focus on the following aspects:\n", + "- Use of 24 or 12 hour base\n", + "- Use of fraction terminology (e.g. \"quarter\" = `15`)\n", + "- Accommodation of key-words (\"noon\", \"midnight\")\n", + "- Counting backwards from the hour (\"ten to five\", \"five to three\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "seU9hTbgFgu7" + }, + "source": [ + "We'll start with the basic system.\n", + "\n", + "For French, time operates on a twenty-four hour system, with the zeroth hour being midnight. Time is given in the following format:\n", + "\n", + "`cardinal + heure(s) + (cardinal)` \n", + "\n", + "This is normalized as:\n", + "\n", + "`cardinal h (cardinal)`\n", + "\n", + "For instance, for `3:03`, we would have:\n", + "- input: \"trois heures trois\"\n", + "- output: `3 h 03`\n", + "\n", + "As such, our grammar needs to utilize a Cardinal WFST and have a means to accept \"heures\" from the input. Taking care of the latter case is simple enough:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HTSVxf4fI_ND" + }, + "outputs": [], + "source": [ + "graph_heures = pynini.accep(\"heure\") + pynini.accep(\"s\").ques\n", + "graph_heures = pynutil.delete(graph_heures)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6LW7pXaXJSZa" + }, + "source": [ + "For the cardinals, we could pass an instance of `CardinalFST` to our graph. But do we really need that level of coverage? We only really need to cover the numbers 0 - 60, which we could simply write a new WFST for. Further, it may be beneficial to allow our graph to separate possible ambiguity. While we will not cover it in our tutorial, you may in the future find it necessary to build a WFST for Measurements, of which quantities of time may play a part. Would it not be helpful for you WFST to know that \"thirty hours\" could only ever be a measurement instead of a possible time of day?\n", + "\n", + "Given the little amount of effort necessary and the quick benefit, we choose to make our hours and minutes explicit in the Time WFST." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "R4aa06ZPLKIR" + }, + "outputs": [], + "source": [ + "hours = pynini.string_map([\n", + " (\"zéro\",\"0\"),\n", + " (\"une\",\"1\"),\n", + " (\"deux\",\"2\"),\n", + " (\"trois\",\"3\"),\n", + " (\"quatre\",\"4\"),\n", + " (\"cinq\",\"5\"),\n", + " (\"six\",\"6\"),\n", + " (\"sept\",\"7\"),\n", + " (\"huit\",\"8\"),\n", + " (\"neuf\",\"9\"),\n", + " (\"dix\",\"10\"),\n", + " (\"onze\",\"11\"),\n", + " (\"douze\",\"12\"),\n", + " (\"treize\",\"13\"),\n", + " (\"quatorze\",\"14\"),\n", + " (\"quinze\",\"15\"),\n", + " (\"seize\",\"16\"),\n", + " (\"dix-sept\",\"17\"),\n", + " (\"dix-huit\",\"18\"),\n", + " (\"dix-neuf\",\"19\"),\n", + " (\"vingt\",\"20\"),\n", + " (\"vingt-et-une\",\"21\"),\n", + " (\"vingt et une\",\"21\"),\n", + " (\"vingt-deux\",\"22\"),\n", + " (\"vingt-trois\",\"23\"),\n", + " (\"vingt-quatre\",\"24\"),\n", + "])\n", + "minutes = pynini.string_map([\n", + " (\"une\", \"01\"),\n", + " (\"deux\", \"02\"),\n", + " (\"trois\", \"03\"),\n", + " (\"quatre\", \"04\"),\n", + " (\"cinq\", \"05\"),\n", + " (\"six\", \"06\"),\n", + " (\"sept\", \"07\"),\n", + " (\"huit\", \"08\"),\n", + " (\"neuf\", \"09\"),\n", + " (\"dix\", \"10\"),\n", + " (\"onze\", \"11\"),\n", + " (\"douze\", \"12\"),\n", + " (\"treize\", \"13\"),\n", + " (\"quatorze\", \"14\"),\n", + " (\"quinze\", \"15\"),\n", + " (\"seize\", \"16\"),\n", + " (\"dix-sept\", \"17\"),\n", + " (\"dix-huit\", \"18\"),\n", + " (\"dix-neuf\", \"19\"),\n", + " (\"vingt\", \"20\"),\n", + " (\"vingt-et-une\", \"21\"),\n", + " (\"vingt et une\", \"21\"),\n", + " (\"vingt-deux\", \"22\"),\n", + " (\"vingt-trois\", \"23\"),\n", + " (\"vingt-quatre\", \"27\"),\n", + " (\"vingt-cinq\", \"25\"),\n", + " (\"vingt-six\", \"26\"),\n", + " (\"vingt-sept\", \"27\"),\n", + " (\"vingt-huit\", \"28\"),\n", + " (\"vingt-neuf\", \"29\"),\n", + " (\"trente\", \"30\"),\n", + " (\"trente-et-une\", \"31\"),\n", + " (\"trente et une\", \"31\"),\n", + " (\"trente-deux\", \"32\"),\n", + " (\"trente-trois\", \"33\"),\n", + " (\"trente-quatre\", \"34\"),\n", + " (\"trente-cinq\", \"35\"),\n", + " (\"trente-six\", \"36\"),\n", + " (\"trente-sept\", \"37\"),\n", + " (\"trente-huit\", \"38\"),\n", + " (\"trente-neuf\", \"39\"),\n", + " (\"quarante\", \"40\"),\n", + " (\"quarante-et-une\", \"41\"),\n", + " (\"quarante et une\", \"41\"),\n", + " (\"quarante-deux\", \"42\"),\n", + " (\"quarante-trois\", \"43\"),\n", + " (\"quarante-quatre\", \"44\"),\n", + " (\"quarante-cinq\", \"45\"),\n", + " (\"quarante-six\", \"46\"),\n", + " (\"quarante-sept\", \"47\"),\n", + " (\"quarante-huit\", \"48\"),\n", + " (\"quarante-neuf\", \"49\"),\n", + " (\"cinquante\", \"50\"),\n", + " (\"cinquante-et-une\", \"51\"),\n", + " (\"cinquante et une\", \"51\"),\n", + " (\"cinquante-deux\", \"52\"),\n", + " (\"cinquante-trois\", \"53\"),\n", + " (\"cinquante-quatre\", \"54\"),\n", + " (\"cinquante-cinq\", \"55\"),\n", + " (\"cinquante-six\", \"56\"),\n", + " (\"cinquante-sept\", \"57\"),\n", + " (\"cinquante-huit\", \"58\"),\n", + " (\"cinquante-neuf\", \"59\"),\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4SmNsNKLM9cC" + }, + "source": [ + "Now that we've managed the basic graph, we can address some of the more niche rules of French timekeeping.\n", + "\n", + "To start, French employs some colloquialisms that will be familiar to English speakers: minutes that are multiples of fifteen are referred to as fractions of a clock. In particular:\n", + "- `5 h 15` -> \"cinq heures **et quart**\"\n", + "- `5 h 30` -> \"cinq heures **et demie**\"\n", + "- `5 h 45` -> \"cinq eures **et trois quarts**\"\n", + "\n", + "We thus need a means of rendering these as their numerical equivalents:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xHe3nfrpSlrE" + }, + "outputs": [], + "source": [ + "# Mapping 'et demi' and 'et qart'\n", + "graph_et = pynutil.delete(\"et\") + delete_space\n", + "\n", + "graph_demi = pynini.accep(\"demi\")\n", + "graph_demi += pynini.accep(\"e\").ques # people vary on feminine or masculine form\n", + "graph_demi = pynini.cross(graph_demi, \"30\")\n", + "\n", + "graph_quart = pynini.accep('quart')\n", + "graph_quart = pynini.cross(graph_quart, '15')\n", + "graph_trois_quart = pynini.cross(\"trois quarts\", \"45\")\n", + "\n", + "graph_fractions = graph_demi | graph_quart | graph_trois_quart\n", + "graph_fractions = graph_et + graph_fractions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HD2wobIQS3fX" + }, + "source": [ + "Also like English, French will use key words to designate a specific timeslot. Noon and midnight are \"midi\" and \"minuit\" respectively." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ahbkiZFuTN2t" + }, + "outputs": [], + "source": [ + "# Midi and minuit\n", + "graph_midi = pynini.cross(\"midi\", \"12\")\n", + "graph_minuit = pynini.cross(\"minuit\", \"0\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6OyMoqfZTX1U" + }, + "source": [ + "Now it's time to throw a wrench into things: counting backwards from the hour. How are we to get what is essentially a graph to do the subtraction necessarily for \"ten to twelve\" to become `11:50`?\n", + "\n", + "Easy: we build the subtraction into the graph itself. That is, we map the hours and minutes produced by our graph onto another graph that produces their amount shifted back a value.\n", + "\n", + "Let's take our \"ten to twelve\" example. Normally \"ten\" would map to `10` and \"twelve\" to `12`. But with these new graphs, the detection of the pattern `minute + to + hour` would signal that `10` should now become `50` and `12` become `11`." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uMWifbm1VQjP" + }, + "source": [ + "Let us do this for our French example. Luckily enough, the indication that a French string is regular: counting backwards from the hour is by use of the pattern `cardinal + heures + moins + minutes`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "c4bV3T1pViCH" + }, + "outputs": [], + "source": [ + "hours_to = pynini.string_map([\n", + " (\"1\",\"0\"),\n", + " (\"2\",\"1\"),\n", + " (\"3\",\"2\"),\n", + " (\"4\",\"3\"),\n", + " (\"5\",\"4\"),\n", + " (\"6\",\"5\"),\n", + " (\"7\",\"6\"),\n", + " (\"8\",\"7\"),\n", + " (\"9\",\"8\"),\n", + " (\"10\",\"9\"),\n", + " (\"11\",\"10\"),\n", + " (\"12\",\"11\"),\n", + " (\"13\",\"12\"),\n", + " (\"14\",\"13\"),\n", + " (\"15\",\"14\"),\n", + " (\"16\",\"15\"),\n", + " (\"17\",\"16\"),\n", + " (\"18\",\"17\"),\n", + " (\"19\",\"18\"),\n", + " (\"20\",\"19\"),\n", + " (\"21\",\"20\"),\n", + " (\"22\",\"21\"),\n", + " (\"23\",\"22\"),\n", + " (\"24\",\"23\"),\n", + " (\"0\",\"23\"),\n", + "])\n", + "minutes_to = pynini.string_map([\n", + " (\"59\", \"01\"),\n", + " (\"58\", \"02\"),\n", + " (\"57\", \"03\"),\n", + " (\"56\", \"04\"),\n", + " (\"55\", \"05\"),\n", + " (\"54\", \"06\"),\n", + " (\"53\", \"07\"),\n", + " (\"52\", \"08\"),\n", + " (\"51\", \"09\"),\n", + " (\"50\", \"10\"),\n", + " (\"49\", \"11\"),\n", + " (\"48\", \"12\"),\n", + " (\"47\", \"13\"),\n", + " (\"46\", \"14\"),\n", + " (\"45\", \"15\"),\n", + " (\"44\", \"16\"),\n", + " (\"43\", \"17\"),\n", + " (\"42\", \"18\"),\n", + " (\"41\", \"19\"),\n", + " (\"40\", \"20\"),\n", + " (\"39\", \"21\"),\n", + " (\"38\", \"22\"),\n", + " (\"37\", \"23\"),\n", + " (\"36\", \"24\"),\n", + " (\"35\", \"25\"),\n", + " (\"34\", \"26\"),\n", + " (\"33\", \"27\"),\n", + " (\"32\", \"28\"),\n", + " (\"31\", \"29\"),\n", + " (\"30\", \"30\"),\n", + " (\"29\", \"31\"),\n", + " (\"28\", \"32\"),\n", + " (\"27\", \"33\"),\n", + " (\"26\", \"34\"),\n", + " (\"25\", \"35\"),\n", + " (\"24\", \"36\"),\n", + " (\"23\", \"37\"),\n", + " (\"22\", \"38\"),\n", + " (\"21\", \"39\"),\n", + " (\"20\", \"40\"),\n", + " (\"19\", \"41\"),\n", + " (\"18\", \"42\"),\n", + " (\"17\", \"43\"),\n", + " (\"16\", \"44\"),\n", + " (\"15\", \"45\"),\n", + " (\"14\", \"46\"),\n", + " (\"13\", \"47\"),\n", + " (\"12\", \"48\"),\n", + " (\"11\", \"49\"),\n", + " (\"10\", \"50\"),\n", + " (\"09\", \"51\"),\n", + " (\"08\", \"52\"),\n", + " (\"07\", \"53\"),\n", + " (\"06\", \"54\"),\n", + " (\"05\", \"55\"),\n", + " (\"04\", \"56\"),\n", + " (\"03\", \"57\"),\n", + " (\"02\", \"58\"),\n", + " (\"01\", \"59\"),\n", + "])\n", + "graph_moins = pynutil.delete(\"moins\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XOKETkIYZy5M" + }, + "source": [ + "Why graph the digits instead of the tokens themselves? Along with avoiding some minor repetition and making editing more apparent, it allows this subgraph to be ported to other languages - if so desired.\n", + "\n", + "Further, it helps us illustrate a helpful idea within this tutorial: as long as a pattern is regular and/or finite, it is no major issue to accommodate it in our graph, regardless of mathematic or logic system it employs." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DJbFiD2fAUc5" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cK0SGXntaDkI" + }, + "source": [ + "Once again we place the grammar within the proper child class of `GraphFst`. We also insert the proper tags for the `Time` class, which are:\n", + "- `hours`\n", + "- `minutes`\n", + "- `suffix` (explained within this section)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9Eq5r-_VbBIg" + }, + "outputs": [], + "source": [ + "graph_hours_component = pynini.union(hours, graph_midi, graph_minuit)\n", + "graph_hours_component = pynutil.insert(\"hours: \\\"\") + graph_hours_component + pynutil.insert(\"\\\"\")\n", + "\n", + "graph_minutes_component = (\n", + " pynutil.insert(\" minutes: \\\"\") + pynini.union(minutes, graph_fractions) + pynutil.insert(\"\\\"\")\n", + ") \n", + "graph_minutes_component = delete_space + graph_minutes_component\n", + "\n", + "graph_time_standard = (graph_hours_component + delete_space + graph_heures \n", + " + pynini.closure(graph_minutes_component, 0, 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2avfS3IacSiC" + }, + "source": [ + "We now setup the alternate graph that allows backwards counting. Note, this is triggered by the occurrence of \"moins\" between the hour and minute component." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "TmpwisOVcn0T" + }, + "outputs": [], + "source": [ + "graph_hours_to_component = hours | graph_midi | graph_minuit\n", + "graph_hours_to_component @= hours_to\n", + "graph_hours_to_component = pynutil.insert(\"hours: \\\"\") + graph_hours_to_component + pynutil.insert(\"\\\"\")\n", + "graph_hours_to_component = graph_hours_to_component + delete_space + graph_heures\n", + "\n", + "graph_minutes_to_component = (minutes | graph_demi | # No 'et' in fractions\n", + " (pynutil.delete(\"le \") + graph_quart) | graph_trois_quart)\n", + "graph_minutes_to_component @= minutes_to\n", + "graph_minutes_to_component = pynutil.insert(\" minutes: \\\"\") + graph_minutes_to_component + pynutil.insert(\"\\\"\")\n", + "\n", + "graph_time_to = graph_hours_to_component + delete_space + graph_moins + delete_space + graph_minutes_to_component" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FkO4tRRfdQT4" + }, + "source": [ + "We now join it with our main component, allowing us to graph all times:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0O0vUVizdU8c" + }, + "outputs": [], + "source": [ + "graph_time = graph_time_standard | graph_time_to" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jbX4JV-LdY3Y" + }, + "source": [ + "Once again we throw a wrench into things with the `suffix` feature. As in the case of Ordinals and Decimals, key-words can play into our Time WFST. For French, this occurs with the words \"du matin\", \"de l'après-midi\", and \"du soir\". (Respectively: \"in the morning\", \"in the afternoon\", and \"in the evening\".) Much like in English, these phrases alter how we write down the time. But instead of indicating `a.m.` or `p.m.`, these indicate *what hour system is used*. For example:\n", + "- \"deux heures du matin\" -> `2 h` = `2:00 a.m.`\n", + "- \"deux heures de l'après-midi\" -> `14 h` = `2:00 p.m.`\n", + "\n", + "Only a twelve hour system is used when these suffixes accompany the time. As such, our Classifier will need to either adjust the times like in the case of counting backwards or must pass the information to the Verbalizer so it can adjust. \n", + "\n", + "Since our Classifier is long enough as is, we will simply store this information in the `suffix` property and allow the Verbalizer to manage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "OqVa78zRgJw9" + }, + "outputs": [], + "source": [ + "graph_suffix_am = pynini.cross(\"du matin\", \"am\")\n", + "graph_suffix_pm = pynini.string_map([(\"de l'après-midi\", \"pm\"),(\"du soir\", \"pm\")])\n", + "\n", + "graph_suffix = pynini.cross(graph_suffix_am, \"am\") | pynini.cross(graph_suffix_pm, \"pm\")\n", + "\n", + "graph_suffix_component = pynutil.insert(\" suffix: \\\"\") + graph_suffix + pynutil.insert(\"\\\"\")\n", + "graph_suffix_component = delete_space + graph_suffix_component\n", + "graph_suffix_component = pynini.closure(graph_suffix_component, 0, 1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-LaJMIjUf1XR" + }, + "source": [ + "And we append to our graph:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "76myCFiggX3E" + }, + "outputs": [], + "source": [ + "class TimeFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"time\", kind=\"classify\")\n", + " \"\"\"grammar omitted for length\n", + " ....\n", + " ....\n", + " ....\n", + " \"\"\"\n", + " graph_hours_component = pynini.union(hours, graph_midi, graph_minuit)\n", + " graph_hours_component = pynutil.insert(\"hours: \\\"\") + graph_hours_component + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_minutes_component = (\n", + " pynutil.insert(\" minutes: \\\"\") + pynini.union(minutes, graph_fractions) + pynutil.insert(\"\\\"\")\n", + " ) \n", + " graph_minutes_component = delete_space + graph_minutes_component\n", + "\n", + " graph_time_standard = (graph_hours_component + delete_space + graph_heures \n", + " + pynini.closure(graph_minutes_component, 0, 1))\n", + "\n", + " graph_hours_to_component = hours | graph_midi | graph_minuit\n", + " graph_hours_to_component @= hours_to\n", + " graph_hours_to_component = pynutil.insert(\"hours: \\\"\") + graph_hours_to_component + pynutil.insert(\"\\\"\")\n", + " graph_hours_to_component = graph_hours_to_component + delete_space + graph_heures\n", + "\n", + " graph_minutes_to_component = (minutes | graph_demi | # No 'et' in fractions\n", + " (pynutil.delete(\"le \") + graph_quart) | graph_trois_quart)\n", + " graph_minutes_to_component @= minutes_to\n", + " graph_minutes_to_component = pynutil.insert(\" minutes: \\\"\") + graph_minutes_to_component + pynutil.insert(\"\\\"\")\n", + "\n", + " graph_time_to = graph_hours_to_component + delete_space + graph_moins + delete_space + graph_minutes_to_component\n", + "\n", + " graph_time_no_suffix = graph_time_standard | graph_time_to\n", + "\n", + " graph_suffix_am = pynini.cross(\"du matin\", \"am\")\n", + " graph_suffix_pm = pynini.string_map([(\"de l'après-midi\", \"pm\"),(\"du soir\", \"pm\")])\n", + "\n", + " graph_suffix = pynini.cross(graph_suffix_am, \"am\") | pynini.cross(graph_suffix_pm, \"pm\")\n", + "\n", + " graph_suffix_component = pynutil.insert(\" suffix: \\\"\") + graph_suffix + pynutil.insert(\"\\\"\")\n", + " graph_suffix_component = delete_space + graph_suffix_component\n", + " graph_suffix_component = pynini.closure(graph_suffix_component, 0, 1)\n", + " \n", + " final_graph = graph_time_no_suffix + graph_suffix_component\n", + "\n", + " final_graph = self.add_tokens(final_graph)\n", + "\n", + " self.fst = final_graph.optimize()\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's see how we did:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "time = TimeFst().fst\n", + "example = \"quatre heures moins cinq\"\n", + "apply_fst(example, time)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lPlJ1qyeAWOL" + }, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CrO-xtJ87PEl" + }, + "source": [ + "The initial part of the Verbalizer should appear familiar. We delete the property tags `hours` and `minutes`, making sure they preserve the actual values for formatting." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "fCzZKR7ek0Mz" + }, + "outputs": [], + "source": [ + "hour = (\n", + " pynutil.delete(\"hours:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1, 2)\n", + " + pynutil.delete(\"\\\"\")\n", + ")\n", + "minute = (\n", + " pynutil.delete(\"minutes:\")\n", + " + delete_extra_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1, 2)\n", + " + pynutil.delete(\"\\\"\")\n", + ")\n", + "graph = hour + delete_extra_space + pynutil.insert(\"h\") + minute.ques" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WnVV9GUKk-b7" + }, + "source": [ + "We then deal with the case of `suffix`. We first note that if the suffix is for a morning time (before noon), then there is no further conversion that is needed. We may simply delete the property and its value." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "haOEiSbglc6s" + }, + "outputs": [], + "source": [ + "day_suffixes = pynutil.delete(\"suffix: \\\"am\\\"\")\n", + "\n", + "graph = hours + delete_extra_space + pynutil.insert(\"h\") + minute.ques + delete_space + day_suffixes.ques" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wL0FNg6Xlhb-" + }, + "source": [ + "Meanwhile, the post-noon suffixes would require us shifting the hours value by twelve. Much like in the case of counting backwards from the hour, we can simply create a WFST to do this addition work for us." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YLrabUNplwG7" + }, + "outputs": [], + "source": [ + "hour_to_night = pynini.string_map([\n", + " (\"1\", \"13\"),\n", + " (\"2\", \"14\"),\n", + " (\"3\", \"15\"),\n", + " (\"4\", \"16\"),\n", + " (\"5\", \"17\"),\n", + " (\"6\", \"18\"),\n", + " (\"7\", \"19\"),\n", + " (\"8\", \"20\"),\n", + " (\"9\", \"21\"),\n", + " (\"10\", \"22\"),\n", + " (\"11\", \"23\"), # Note that 12 and 24 would be phrased \"midi\" and \"minuit\" respectively\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "X0-z-qJAmIiI" + }, + "source": [ + "We then create an alternate graph where this conversion is mapped onto the hours function - given a post-noon suffix - and create a union with our earlier graph:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8CdEmo9NmN7u" + }, + "outputs": [], + "source": [ + "night_suffixes = pynutil.delete(\"suffix: \\\"pm\\\"\")\n", + "graph |= (\n", + " hour @ hour_to_night\n", + " + delete_extra_space\n", + " + pynutil.insert(\"h\")\n", + " + minute.ques\n", + " + delete_space\n", + " + night_suffixes\n", + " )" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YnoIkZBqmaTo" + }, + "source": [ + "Giving us a final Verbalizer of:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZfXimvFBmdDD" + }, + "outputs": [], + "source": [ + "class TimeFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"time\", kind=\"verbalize\")\n", + "\n", + " hour_to_night = pynini.string_map([\n", + " (\"1\", \"13\"),\n", + " (\"2\", \"14\"),\n", + " (\"3\", \"15\"),\n", + " (\"4\", \"16\"),\n", + " (\"5\", \"17\"),\n", + " (\"6\", \"18\"),\n", + " (\"7\", \"19\"),\n", + " (\"8\", \"20\"),\n", + " (\"9\", \"21\"),\n", + " (\"10\", \"22\"),\n", + " (\"11\", \"23\"),\n", + "])\n", + "\n", + " day_suffixes = pynutil.delete(\"suffix: \\\"am\\\"\")\n", + " night_suffixes = pynutil.delete(\"suffix: \\\"pm\\\"\")\n", + "\n", + " hour = (\n", + " pynutil.delete(\"hours:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1, 2)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " minute = (\n", + " pynutil.delete(\"minutes:\")\n", + " + delete_extra_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_DIGIT, 1, 2)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + "\n", + " graph = hour + delete_extra_space + pynutil.insert(\"h\") + minute.ques + delete_space + day_suffixes.ques\n", + "\n", + " graph |= (\n", + " hour @ hour_to_night\n", + " + delete_extra_space\n", + " + pynutil.insert(\"h\")\n", + " + minute.ques\n", + " + delete_space\n", + " + night_suffixes\n", + " )\n", + " delete_tokens = self.delete_tokens(graph)\n", + " self.fst = delete_tokens.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e5tPcCaSYuhY" + }, + "source": [ + "If you've noticed, the Verbalizer process has become simpler as we've progressed through our WFSTs. Commonly, you will seldom need to even provide the amount of overhead we've seen in `TimeFst`, `MoneyFst`, and `OrdinalFst`, and the majority of this component is simply removing tokens as an intermediary step, as we'll see for our Name class." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iHmRe3UIhyIH" + }, + "source": [ + "# WhiteList WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8kMn2qB9bVFy" + }, + "source": [ + "\n", + "While developing your grammars, you may encounter tokens that refuse standard categorization and yet still require normalization. For example, you may need to render \"Mister Brown\" as `Mr. Brown` or \"H M S Nelson\" as `H.M.S. Nelson`. As these cases are rather specific, they lack a regular pattern for a specific classifier. (What about \"mister\" as a token requires tokenization as opposed to \"Brown\".) Instead, we need to explicitly list their input-output mappings (i.e. a whitelist).\n", + "\n", + "For NeMo, this is performed through the `WhiteListFst`:\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6B4oPXYcccWs" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RThTLUCRceOO" + }, + "source": [ + "`WhitelistFst` is essentially just a wrapper for a `string_map` or `string_file` mapping with the appropriate formatting for deployment. Per our example, we can make a graph with the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "eIOOb_wJdMMx" + }, + "outputs": [], + "source": [ + "graph = pynini.string_map([\n", + " (\"mister\", \"mr.\"),\n", + " (\"h m s\", \"h.m.s\"),\n", + " (\"doctor\", \"dr.\")\n", + "])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O5kTXwmPZ9Tt" + }, + "source": [ + "As previously mentioned, here is where the use of `string_file` will make maintenance much easier. Discovering whitelist mappings is an iterative process and you will more than likely need to return to your list throughout development. For instance, it may be obvious that tokens such as \"madame\", \"miss\", \"esquire\", but would you think of providing abbreviations for \"the right honorable\" or \"tennessee valley authority\"? Keeping a tsv file available for quick insertions greatly assists here." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RC5Cf-Z8dYVk" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "144nvAHEdfBJ" + }, + "source": [ + "Unlike for our other WFSTs, There is no specific semiotic class for `WhiteListFst`. It instead falls under the default Name class to designate there is no need for further processing beyond obligatory tokenization. Indeed, we can simply insert the token ourselves instead of calling `add_tokens`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oPkrmg2gdznd" + }, + "outputs": [], + "source": [ + "class WhiteListFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"whitelist\", kind=\"classify\")\n", + "\n", + " whitelist = pynini.string_map([\n", + " (\"mister\", \"mr.\"),\n", + " (\"h m s\", \"h.m.s\"),\n", + " (\"doctor\", \"dr.\")])\n", + " graph = pynutil.insert(\"name: \\\"\") + convert_space(whitelist) + pynutil.insert(\"\\\"\")\n", + " self.fst = graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B05kdSIdd2dv" + }, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Since the whitelisted token has already been rendered in the desired normalized form, all that is necessary is to strip the `name` token and render the string 'as is'. This can be done by through the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gaq3voIYiUCA" + }, + "outputs": [], + "source": [ + "class WhiteListFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"whitelist\", kind=\"verbalize\")\n", + " graph = (\n", + " pynutil.delete(\"name:\")\n", + " + delete_space\n", + " + pynutil.delete(\"\\\"\")\n", + " + pynini.closure(NEMO_CHAR - \" \", 1)\n", + " + pynutil.delete(\"\\\"\")\n", + " )\n", + " graph = graph @ pynini.cdrewrite(pynini.cross(u\"\\u00A0\", \" \"), \"\", \"\", NEMO_SIGMA) # Removes possible null token\n", + " self.fst = graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cUE7Gg35bWKb" + }, + "source": [ + "While the graph is largely self-explanatory, take note that the default implementation assumes a character string without spacing. If you intend to include additional formatting in your normalization (e.g. `H. M. S.` instead of `H.M.S.`), you may need to adjust the graph to expand coverage." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_o_a15Fg7niv" + }, + "source": [ + "# Word and Punctuation WFST " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zi6lP7mTmnUV" + }, + "source": [ + "Continuing with the Name class, we will conclude with the Word and Punctuation WFSTs. These are among the simplest and most crucial classes of the entire ITN system, as they classify all tokens that are not caught by other semiotic classes. Since these other tokens make up the majority of all strings your normalization system will encounter, they are essential for general functionality.\n", + "\n", + "However, they escape discussion as their function is self-evident: since they function as default classes, tokens only reach Word WFST and Punctuation WFST if they have not been accepted by the other WFSTs. As such, we can simply accept the tokens as they are, providing them a `name` tag." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9zCqczLqp5NW" + }, + "source": [ + "## Classifier" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eUWum5U0p99c" + }, + "source": [ + "For instance, consider the entire `WordFst` Classifier in its entirety:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "CCZSTeDHofDl" + }, + "outputs": [], + "source": [ + "class WordFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"word\", kind=\"classify\")\n", + " word = pynutil.insert(\"name: \\\"\") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert(\"\\\"\")\n", + " self.fst = word.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9ys2VpjjoiEC" + }, + "source": [ + "It just processes the entire token string with the `NEMO_NOT_SPACE` utility WFST (which accepts any string that is not a space). For your language, you may simply use one of the preexisting `WordFst`.\n", + "\n", + "Depending on language, the `PunctuationFst` may require some (minimal) adjustment. Note the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Mnnd3PVMpF4t" + }, + "outputs": [], + "source": [ + "class PunctuationFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"punctuation\", kind=\"classify\")\n", + "\n", + " s = \"!#$%&\\'()*+,-./:;<=>?@^_`{|}~\"\n", + " punct = pynini.union(*s)\n", + "\n", + " graph = pynutil.insert(\"name: \\\"\") + punct + pynutil.insert(\"\\\"\")\n", + "\n", + " self.fst = graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_afW02LXpLtz" + }, + "source": [ + "If your language uses other punctuation than that in the `s` string (or reserves some of the punctuation as characters), you may simply edit `s` to accommodate. \n", + "\n", + "For instance, French has a unique quotation style that utilizes guillemets \"« »\". We may add their Unicode codepoints (to avoid encoding issues) to `s`:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mgfZIKzVplVm" + }, + "outputs": [], + "source": [ + "class PunctuationFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"punctuation\", kind=\"classify\")\n", + "\n", + " s = \"!#$%&\\'()*+,-./:;<=>?@^_`{|}~\"\n", + " guillemets = \"\\u00AB\" + \"\\u00BB\" # quotation marks in French.\n", + " s += guillemets\n", + " punct = pynini.union(*s)\n", + "\n", + " graph = pynutil.insert(\"name: \\\"\") + punct + pynutil.insert(\"\\\"\")\n", + "\n", + " self.fst = graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Upb5-wcp_7H" + }, + "source": [ + "## Verbalizer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ufWT1T6GqCCT" + }, + "source": [ + "Note that both `PunctuationFst` and `WordFst` both encode with the `name` property. This leaves no differentiation between the two for a Verbalizer. This makes sense as there are no particular formatting rules for them, they simply need a placeholder tag to avoid alteration between the Classifier and Verbalizer step. Once passed to the verbalizer, they are rendered as normal by simply removing the tag (this is practically identical to the WhiteListFST):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LqyhqQKZqcph" + }, + "outputs": [], + "source": [ + "class WordFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"word\", kind=\"verbalize\")\n", + " chars = pynini.closure(NEMO_CHAR - \" \", 1)\n", + " char = pynutil.delete(\"name:\") + delete_space + pynutil.delete(\"\\\"\") + chars + pynutil.delete(\"\\\"\")\n", + " graph = char @ pynini.cdrewrite(pynini.cross(u\"\\u00A0\", \" \"), \"\", \"\", NEMO_SIGMA) # Cleans up possible null character\n", + "\n", + " self.fst = graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lGbrUkcpapyi" + }, + "source": [ + "For many languages, the writing of your `WordFst` and `PunctuationFst` (both Classifiers and Verbalizers) will require no more than duplicating the preexisting grammars found in NeMo Text Processing." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5y9jhkhQ7p4W" + }, + "source": [ + "# Other Classes " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "j1mgnISmiu-g" + }, + "source": [ + "While the preceding discussion should be suitable for development of the remaining classes, some helpful notes may be of use before continuing:\n", + "- Fraction WFST: This is the last of the 'fundamental' classes and should take priority after completion of the Decimal WFST. It operates very similarly to the Ordinal WFST in that you wish to recover the Cardinal roots for the numerator and denominator prior to tagging. Its properties are: `negative`, `integer_part`, `numerator`, and `denominator`.\n", + "- Measure WFST: Like the Money WFST, this will require management of several 'parent' WFSTS (Fraction, Cardinal, Decimal) to be suitably comprehensive. As well, you may find it more productive to find ways to compose new measurement units instead of simply listing all (e.g. micrometers, petameters, miles per hour, feet per second). Its properties are: `negative`, `units` and it allows subgraphs of the `cardinal`, `decimal`, and `fraction` classes. (This is, it allows tokenization within the tokenization.)\n", + "- Date WFST: Depending on writing conventions, this may vary in complexity. For instance, English speakers may write dates as `01/01/2021/` or `Jan. 1 2021`. Are there specific use cases where one is preferred or should you simply decide on a format? Further, you may wish to take advantage of the `preserve order` property to avoid possible unwanted verbalizations (some implementations will permit both `Jan. 1` and `1 Jan.` if not careful.) Its properties are: `month`, `day`, and `year`. \n", + "- Telephone WFST: These will be heavily dependent not only on writing conventions but even regional preference. For instance, the U.S. commonly uses a ten digit system broken into the following sequence: `###-###-####`. Meanwhile, mainland France breaks a ten digit sequence into groups of two: `##-##-##-##-##`. Take careful note of how your language's target region verbalizes these figures and leave room for some variation in development. The `telephone` class has only one property: `number_part`. \n", + "- Electronic WFST: For normalizing email addresses or urls, you will need to develop for the `electronic` class. The main concerns will be managing alphanumeric strings and parsing the reserved symbols used for protocols and domains. (How does your target language pronounce \"https://\"? www? '.' or '@'?\") Depending on whether you are normalizing a url or email, the following properties will be needed:\n", + " - email: `username`, `domain`\n", + " - url: `protocol` (Sparrowhawk allows further detail here but NeMo passes the entire url through the `protocol` property)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-i25X8mK90n3" + }, + "source": [ + "# Tokenize and Classify " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "v4bcigU6b9ss" + }, + "source": [ + "We are now ready to build a general Classifier for our entire language. Upon completion of your grammars, the next step is to unite them together in a general Classifier WFST - located within a `tokenize_and_classify.py` file, preferably. This WFST will be responsible for determining the appropriate semiotic class for each token in your string and processing the necessary properties for normalization.\n", + "\n", + "For this section, we will focus on the following: grammar composition, assignment of weights, and importing/exporting as a FAR file. Since we will need to work with some instantiated graphs, let's preload them before proceeding. (Note the compilingtime.)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.decimal import DecimalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.money import MoneyFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.ordinal import OrdinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.punctuation import PunctuationFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.time import TimeFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.whitelist import WhiteListFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.taggers.word import WordFst\n", + "\n", + "cardinal = CardinalFst()\n", + "cardinal_graph = cardinal.fst\n", + "\n", + "ordinal = OrdinalFst(cardinal)\n", + "ordinal_graph = ordinal.fst\n", + "\n", + "decimal = DecimalFst(cardinal)\n", + "decimal_graph = decimal.fst\n", + "\n", + "whitelist_graph = WhiteListFst().fst\n", + "word_graph = WordFst().fst\n", + "time_graph = TimeFst().fst\n", + "money_graph = MoneyFst(cardinal, decimal).fst\n", + "punct_graph = PunctuationFst().fst" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MIv58eSocOV1" + }, + "source": [ + "## Grammar" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "k_RPlnfVdG5E" + }, + "source": [ + "As for all previous grammars, the `tokenize_and_classify` grammar inherits from a `GraphFst` as an individual class: `ClassifyFst`. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "WHKG4c2WdW0G" + }, + "outputs": [], + "source": [ + "class ClassifyFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "j9_I6DJmdcOG" + }, + "source": [ + "This class is responsible for instantiating all subgraphs and passing necessary dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4YtmcxLOdlas" + }, + "outputs": [], + "source": [ + "class ClassifyFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", + "\n", + " cardinal = CardinalFst()\n", + " cardinal_graph = cardinal.fst\n", + "\n", + " ordinal = OrdinalFst(cardinal)\n", + " ordinal_graph = ordinal.fst\n", + "\n", + " decimal = DecimalFst(cardinal)\n", + " decimal_graph = decimal.fst\n", + "\n", + " whitelist_graph = WhiteList().fst\n", + " word_graph = WordFst().fst\n", + " time_graph = TimeFst().fst\n", + " money_graph = MoneyFst(cardinal, decimal).fst\n", + " punct_graph = PunctuationFst().fst" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y5vGvv3HeAY9" + }, + "source": [ + "We then join all the grammars together so `ClassifyFst` can apply them. Rather unceremoniously, this is accomplished by performing a union across all grammars (excluding `PunctuationFst`, to assist tokenization). We then follow this union by inserting the `tokens` class around the resulting formatting (required for processing):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "oocgPQ5geZJO" + }, + "outputs": [], + "source": [ + "class ClassifyFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", + "\n", + " cardinal = CardinalFst()\n", + " cardinal_graph = cardinal.fst\n", + "\n", + " ordinal = OrdinalFst(cardinal)\n", + " ordinal_graph = ordinal.fst\n", + "\n", + " decimal = DecimalFst(cardinal)\n", + " decimal_graph = decimal.fst\n", + "\n", + " whitelist_graph = WhiteListFst().fst\n", + " word_graph = WordFst().fst\n", + " time_graph = TimeFst().fst\n", + " money_graph = MoneyFst(cardinal, decimal).fst\n", + " punct_graph = PunctuationFst().fst\n", + "\n", + " classify = (\n", + " time_graph\n", + " | whitelist_graph\n", + " | decimal_graph\n", + " | cardinal_graph\n", + " | ordinal_graph\n", + " | money_graph\n", + " | word_graph\n", + " )\n", + " token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ASWDXWQjfLEU" + }, + "source": [ + "Our graph is now able to process an individual token. But what about a string? Here you will need to be mindful of the tokenization behavior for your language and decide on your desired treatment of punctuation (hence exclusion from the main graph). \n", + "\n", + "For our purposes, we will assume the convention of space and punctuation serving as token separators. We graph punctuation as individual tokens" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "r6WztK2jwhFt" + }, + "outputs": [], + "source": [ + "punct_graph = PunctuationFst().fst\n", + "punct = pynutil.insert(\"tokens { \") + punct_graph + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9T2rT89jw3T1" + }, + "source": [ + "and join the `punct` graph with our `tokens` graph (inserting spaces between tokens for formatting)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rGtVOK-txKOP" + }, + "outputs": [], + "source": [ + "token = \"PLACEHOLDER\"\n", + "token_plus_punct = (\n", + " pynini.closure(punct + pynutil.insert(\" \")) + token + pynini.closure(pynutil.insert(\" \") + punct)\n", + " ) # Note the use of closure incase there are multiple punctuations\n", + "graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_gixfQ69xWPe" + }, + "source": [ + "then address space between tokens: \n", + "\n", + "`graph = delete_space + graph + delete_space`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DWnmazWecyUG" + }, + "source": [ + "## Weighting " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "egHbwIbMx-hT" + }, + "source": [ + "Were we to leave our `ClassifyFst` like this, we would undoubtedly encounter a mountain of errors. What will stop our graph from treating punctuation that is part of a previous grammar as a token separator (e.g. \"vingt-et-un\")? How do we ensure that a currency string isn't treated as solely a decimal string with a `name` token following?\n", + "\n", + "As in previous cases, the solution lies in our choice of weights for the grammar." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y3U7_M8CyxZ1" + }, + "source": [ + "Let us return to the main graph:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "9VXe1dfsy3Be" + }, + "outputs": [], + "source": [ + "classify = (\n", + " time_graph\n", + " | whitelist_graph\n", + " | decimal_graph\n", + " | cardinal_graph\n", + " | ordinal_graph\n", + " | money_graph\n", + " | word_graph\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + punct_graph + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aY4vOFqxy5ua" + }, + "source": [ + "Beyond the path weights that we explicitly added, these graphs are currently weightless. Since we want the graphs themselves to be the general determiners of a path, let us use some default weights an order of magnitude beyond our path weights (we use `pynutil.add_weight`):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "bthyt_Le2rsA" + }, + "outputs": [], + "source": [ + "classify = (\n", + " pynutil.add_weight(time_graph, 1)\n", + " | pynutil.add_weight(whitelist_graph, 1)\n", + " | pynutil.add_weight(decimal_graph, 1)\n", + " | pynutil.add_weight(cardinal_graph, 1)\n", + " | pynutil.add_weight(ordinal_graph, 1)\n", + " | pynutil.add_weight(money_graph, 1)\n", + " | pynutil.add_weight(word_graph, 1)\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1) + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xMNIJbzj3MMP" + }, + "source": [ + "Let's see what logical adjustments should be made. First off, we know that we want each class token to span the largest string possible. (e.g. We don't want \"quatre-vingt\" to be rendered as two `cardinal` classes with a hyphen in between.) As such, we want to penalize our graph for using more than one tokens. We can do so by establishing the following constraint: the sum of two or more tokens cannot be less than the weight of a single token. Or, for any pair of tokens `w_1` and `w_2`, their sum must always be greater than any other individual token (including themselves):\n", + "\n", + "`w_1 + w_2 > k >= w`\n", + "\n", + "To keep things simple, let us make the upper limit `2`. This means we should increase all the weights to keep our constraint:\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "classify = (\n", + " pynutil.add_weight(time_graph, 1.1)\n", + " | pynutil.add_weight(whitelist_graph, 1.1)\n", + " | pynutil.add_weight(decimal_graph, 1.1)\n", + " | pynutil.add_weight(cardinal_graph, 1.1)\n", + " | pynutil.add_weight(ordinal_graph, 1.1)\n", + " | pynutil.add_weight(money_graph, 1.1)\n", + " | pynutil.add_weight(word_graph, 1.1)\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Do we want this constraint to include all tokens? Imagine if we had a string of multiple semiotic tokens in a row. Since this string's combined weight would be larger than any single class token, a grammar that served as a universal acceptor (i.e. `word_graph`) would be preferred over these individual classes. This would be obviously incorrect. As such, we want to make sure that `word_graph` would only be traversed when there is truly no other option:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qc_CU2ro63eg" + }, + "outputs": [], + "source": [ + "classify = (\n", + " pynutil.add_weight(time_graph, 1.1)\n", + " | pynutil.add_weight(whitelist_graph, 1.1)\n", + " | pynutil.add_weight(decimal_graph, 1.1)\n", + " | pynutil.add_weight(cardinal_graph, 1.1)\n", + " | pynutil.add_weight(ordinal_graph, 1.1)\n", + " | pynutil.add_weight(money_graph, 1.1)\n", + " | pynutil.add_weight(word_graph, 100)\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, even with a string of fifty different class tokens, `word_graph` would still not be considered as a path to traverse." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fW8C3vD-7Dbl" + }, + "source": [ + "Next, let us consider our foundational graph: `cardinal_graph`. As Cardinals occur in practically all our WFSTs, it's possible for `cardinal_graph` to apply in almost all cases. Yet, we've specifically invoked `CardinalFST` when it was required in any of the other classes, so it will never be needed in any of those cases. This means that we want all those graphs to have *priority* over `cardinal_graph`. As such, we will increase its weight so it takes second lowest precedence (while still paying attention to the combined weight constraint). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "97UwGaEn8pj7" + }, + "outputs": [], + "source": [ + "classify = (\n", + " pynutil.add_weight(time_graph, 1.1)\n", + " | pynutil.add_weight(whitelist_graph, 1.1)\n", + " | pynutil.add_weight(decimal_graph, 1.1)\n", + " | pynutil.add_weight(cardinal_graph, 1.2)\n", + " | pynutil.add_weight(ordinal_graph, 1.1)\n", + " | pynutil.add_weight(money_graph, 1.1)\n", + " | pynutil.add_weight(word_graph, 100)\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0d9Lw4Ot88_B" + }, + "source": [ + "This form of thinking can be applied to all the 'foundational' graphs you may develop: the dependent graphs should take higher precedence than the graphs they borrow from. For instance, since `money_graph` utilizes `decimal_graph`, we know it should take precedence. However, since `decimal_graph` borrows from `cardinal_graph`, its weight must still be less than `1.2`. As such: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-wF8cgLK9tpU" + }, + "outputs": [], + "source": [ + "classify = (\n", + " pynutil.add_weight(time_graph, 1)\n", + " | pynutil.add_weight(whitelist_graph, 1)\n", + " | pynutil.add_weight(decimal_graph, 1.1)\n", + " | pynutil.add_weight(cardinal_graph, 1.2)\n", + " | pynutil.add_weight(ordinal_graph, 1)\n", + " | pynutil.add_weight(money_graph, 1.09)\n", + " | pynutil.add_weight(word_graph, 100)\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1) + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "huMzDoZ2-FD2" + }, + "source": [ + "For those classes that don't seem affected, we can set their weights as the same as those below their 'foundation' graphs, simply to prevent prioritization when not required\n", + "\n", + "Meanwhile, `whitelist_graph` should take precedence over all others, as it may contain unique normalizations that may get accidentally caught by the other graphs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gWG6ttyd-bbD" + }, + "outputs": [], + "source": [ + "classify = (\n", + " pynutil.add_weight(time_graph, 1.1)\n", + " | pynutil.add_weight(whitelist_graph, 1.07)\n", + " | pynutil.add_weight(decimal_graph, 1.1)\n", + " | pynutil.add_weight(cardinal_graph, 1.2)\n", + " | pynutil.add_weight(ordinal_graph, 1.1)\n", + " | pynutil.add_weight(money_graph, 1.08)\n", + " | pynutil.add_weight(word_graph, 100)\n", + " )\n", + "punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, 1.1) + pynutil.insert(\" }\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1TH08f8O-fWx" + }, + "source": [ + "Keep in mind that building weights in this manner is hardly a rule for grammar development and is instead intended as a means to initialize weights for empirical development. You will find that actual strings will cause unexpected behavior that require fine tuning. \n", + "\n", + "For instance, the Classifier for French in NeMo ITN benefits from having varying precedence for some weights, as seen in the following excerpt:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gKdkyDK3_r46" + }, + "outputs": [], + "source": [ + "class ClassifyFst(GraphFst):\n", + " \"\"\"\n", + " Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.\n", + " For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File. \n", + " More details to deployment at NeMo/tools/text_processing_deployment.\n", + "\n", + " Args:\n", + " cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.\n", + " overwrite_cache: set to True to overwrite .far files\n", + " \"\"\"\n", + "\n", + " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", + "\n", + " far_file = None\n", + " if cache_dir is not None and cache_dir != \"None\":\n", + " os.makedirs(cache_dir, exist_ok=True)\n", + " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", + " if not overwrite_cache and far_file and os.path.exists(far_file):\n", + " self.fst = pynini.Far(far_file, mode=\"r\")[\"tokenize_and_classify\"]\n", + " logging.info(f\"ClassifyFst.fst was restored from {far_file}.\")\n", + " else:\n", + " logging.info(f\"Creating ClassifyFst grammars.\")\n", + "\n", + " cardinal = CardinalFst()\n", + " cardinal_graph = cardinal.fst\n", + "\n", + " fraction = FractionFst(cardinal)\n", + " fraction_graph = fraction.fst\n", + "\n", + " ordinal = OrdinalFst(cardinal)\n", + " ordinal_graph = ordinal.fst\n", + "\n", + " decimal = DecimalFst(cardinal)\n", + " decimal_graph = decimal.fst\n", + "\n", + " measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction).fst\n", + " date_graph = DateFst(cardinal).fst\n", + " word_graph = WordFst().fst\n", + " time_graph = TimeFst().fst\n", + " money_graph = MoneyFst(cardinal, decimal).fst\n", + " whitelist_graph = WhiteListFst().fst\n", + " punct_graph = PunctuationFst().fst\n", + " electronic_graph = ElectronicFst().fst\n", + " telephone_graph = TelephoneFst().fst\n", + "\n", + " classify = (\n", + " pynutil.add_weight(whitelist_graph, 1.01)\n", + " | pynutil.add_weight(time_graph, 1.05)\n", + " | pynutil.add_weight(date_graph, 1.09)\n", + " | pynutil.add_weight(decimal_graph, 1.08)\n", + " | pynutil.add_weight(measure_graph, 1.1)\n", + " | pynutil.add_weight(cardinal_graph, 1.1)\n", + " | pynutil.add_weight(ordinal_graph, 1.1)\n", + " | pynutil.add_weight(fraction_graph, 1.09)\n", + " | pynutil.add_weight(money_graph, 1.07)\n", + " | pynutil.add_weight(telephone_graph, 1.1)\n", + " | pynutil.add_weight(electronic_graph, 1.1)\n", + " | pynutil.add_weight(word_graph, 100)\n", + " )\n", + "\n", + " punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(\" }\")\n", + " token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")\n", + " token_plus_punct = (\n", + " pynini.closure(punct + pynutil.insert(\" \")) + token + pynini.closure(pynutil.insert(\" \") + punct)\n", + " )\n", + "\n", + " graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)\n", + " graph = delete_space + graph + delete_space\n", + "\n", + " self.fst = graph.optimize()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qc4B_0rNcQZu" + }, + "source": [ + "## FAR import/export" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0nRRPvy-AYsA" + }, + "source": [ + "While working through these code excerpts, you may have noticed some latency with each instantiation of our WFSTs (notably wherever `CardinalFst` was involved). This is because the `pynini.optimize` that we call with each graph's instantiation is computationally expensive. For our ultimate purpose of deployment, it seems a waste of resources to recreate stable graphs for each use.\n", + "\n", + "To address this, NeMo ITN supports WFST caching through use of `pynini.Far`, storing and recovering Classify grammars as FAR (Fst ARchives).\n", + "\n", + "Let us update our `ClassifyFst` to permit passing a cache and allowing overwriting (for development):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5XgWevUzD1AE" + }, + "outputs": [], + "source": [ + "class ClassifyFst(GraphFst):\n", + " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "l28GMR70ESz0" + }, + "source": [ + "For storing our graphs as FARs, we can use `graph_utils.generator_main`, which saves our WFSTs by type for easier management. For arguments it takes a string name and a dict mapping of WFST type to graph:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AzTkcmAWFLYm" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "class ClassifyFst(GraphFst):\n", + " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", + " # Grammar here\n", + " # ....\n", + " if cache_dir is not None and cache_dir != \"None\":\n", + " os.makedirs(cache_dir, exist_ok=True)\n", + " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", + " generator_main(far_file, {\"tokenize_and_classify\": self.fst})" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Wz8wjCQSD6eJ" + }, + "source": [ + "We pair this with the ability to load from cache (note the `\"tokenize_and_classify\"` key being passed):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FRFYgMmuD_53" + }, + "outputs": [], + "source": [ + "import os\n", + "\n", + "class ClassifyFst(GraphFst):\n", + " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", + " if not overwrite_cache and far_file and os.path.exists(far_file):\n", + " self.fst = pynini.Far(far_file, mode=\"r\")[\"tokenize_and_classify\"]\n", + " else:\n", + " # Grammar here\n", + " # ....\n", + " if cache_dir is not None and cache_dir != \"None\":\n", + " os.makedirs(cache_dir, exist_ok=True)\n", + " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", + " generator_main(far_file, {\"tokenize_and_classify\": self.fst})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ib9nggZxF38s" + }, + "source": [ + "Producing our `ClassifyFst` as:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d2BZyx6sGGg2" + }, + "outputs": [], + "source": [ + "class ClassifyFst(GraphFst):\n", + " def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):\n", + " super().__init__(name=\"tokenize_and_classify\", kind=\"classify\")\n", + "\n", + " far_file = None\n", + " if cache_dir is not None and cache_dir != \"None\":\n", + " os.makedirs(cache_dir, exist_ok=True)\n", + " far_file = os.path.join(cache_dir, \"_fr_itn.far\")\n", + " if not overwrite_cache and far_file and os.path.exists(far_file):\n", + " self.fst = pynini.Far(far_file, mode=\"r\")[\"tokenize_and_classify\"]\n", + " else:\n", + " cardinal = CardinalFst()\n", + " cardinal_graph = cardinal.fst\n", + "\n", + " ordinal = OrdinalFst(cardinal)\n", + " ordinal_graph = ordinal.fst\n", + "\n", + " decimal = DecimalFst(cardinal)\n", + " decimal_graph = decimal.fst\n", + "\n", + " whitelist_graph = WhiteList().fst\n", + " word_graph = WordFst().fst\n", + " time_graph = TimeFst().fst\n", + " money_graph = MoneyFst(cardinal, decimal).fst\n", + " whitelist_graph = WhiteListFst().fst\n", + " punct_graph = PunctuationFst().fst\n", + "\n", + " classify = (\n", + " pynutil.add_weight(time_graph, 1.1)\n", + " | pynutil.add_weight(whitelist_graph, 1.01)\n", + " | pynutil.add_weight(decimal_graph, 1.09)\n", + " | pynutil.add_weight(cardinal_graph, 1.1)\n", + " | pynutil.add_weight(ordinal_graph, 1.09)\n", + " | pynutil.add_weight(money_graph, 1.08)\n", + " | pynutil.add_weight(word_graph, 100)\n", + " )\n", + "\n", + " punct = pynutil.insert(\"tokens { \") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(\" }\")\n", + " token = pynutil.insert(\"tokens { \") + classify + pynutil.insert(\" }\")\n", + " token_plus_punct = (\n", + " pynini.closure(punct + pynutil.insert(\" \")) + token + pynini.closure(pynutil.insert(\" \") + punct)\n", + " )\n", + "\n", + " graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)\n", + " graph = delete_space + graph + delete_space\n", + "\n", + " self.fst = graph.optimize()\n", + "\n", + " if far_file:\n", + " generator_main(far_file, {\"tokenize_and_classify\": self.fst})" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nEhY6wKKtfhn" + }, + "source": [ + "You should find the caching to vastly speed up compilingtime." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rTtCnC5w95CI" + }, + "source": [ + "# Verbalize and Verbalize Final " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "H9y5yuk1HaGj" + }, + "source": [ + "Our last step is to create a universal Verbalizer for all classes. This is very similar to development of `ClassifierFst`, except that the Verbalizer breaks its normalization task into two components:\n", + "- `VerbalizeFst`, which removes formatting for each token\n", + "- `VerbalizeFinalFst`, which extends `VerbalizeFst` across all tokens in a string\n", + "Why two componenets when `tokenize_and_classify` was one? Because Sparrowhawk performs all the functionality of `VerbalizeFinalFst`, so its inclusion would break deployment. However, without it, your NeMo grammar would be unable to function at base. So we separate the two to allow the best of both world." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vUawTJVuH8iR" + }, + "source": [ + "## VerbalizeFst" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xghiBV06IIWU" + }, + "source": [ + "Much like `ClassifyFst`, `VerbalizeFst` instantiates all its subgraphs and then joins them together under a union operation. However, it does not need to employ weighting. Why? Because `ClassifyFst` has assigned each token a specific class. As each class is unique, there is no possibility that a subgraph will be employed for the wrong token.\n", + "\n", + "As such, our `VerbalizeFst` is formed by a simple union operation across all previous Verbalizer graphs:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uMVCqCvsIt2v" + }, + "outputs": [], + "source": [ + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.cardinal import CardinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.decimal import DecimalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.money import MoneyFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.ordinal import OrdinalFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.time import TimeFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.whitelist import WhiteListFst\n", + "from nemo_text_processing.inverse_text_normalization.fr.verbalizers.word import WordFst\n", + "\n", + "class VerbalizeFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"verbalize\", kind=\"verbalize\")\n", + " cardinal = CardinalFst()\n", + " cardinal_graph = cardinal.fst\n", + " ordinal_graph = OrdinalFst().fst\n", + " decimal = DecimalFst()\n", + " decimal_graph = decimal.fst\n", + " whitelist_graph = WhiteListFst().fst\n", + " money_graph = MoneyFst(decimal=decimal).fst\n", + " time_graph = TimeFst().fst\n", + " graph = (\n", + " time_graph\n", + " | whitelist_graph\n", + " | money_graph\n", + " | ordinal_graph\n", + " | decimal_graph\n", + " | cardinal_graph\n", + " )\n", + " self.fst = graph" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Wap-LU6EI2Iu" + }, + "source": [ + "## Verbalize Final" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TYaEt_0tI47t" + }, + "source": [ + "With `VerbalizeFst` complete, we now extend our graph to cover any series of tokens. All this requires is deletion of the `tokens` formatting (note the absence of such in our previous graph) and use of closure for any series of one or more tokens.\n", + "\n", + "This provides the following graph:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "L-9lJNE6JPCW" + }, + "outputs": [], + "source": [ + "\n", + "class VerbalizeFinalFst(GraphFst):\n", + " def __init__(self):\n", + " super().__init__(name=\"verbalize_final\", kind=\"verbalize\")\n", + " verbalize = VerbalizeFst().fst\n", + " word = WordFst().fst\n", + " types = verbalize | word\n", + " graph = (\n", + " pynutil.delete(\"tokens\")\n", + " + delete_space\n", + " + pynutil.delete(\"{\")\n", + " + delete_space\n", + " + types\n", + " + delete_space\n", + " + pynutil.delete(\"}\")\n", + " )\n", + " graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space\n", + " self.fst = graph" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WwMKFw-QJVgm" + }, + "source": [ + "Unlike `ClassifyFst`, NeMo ITN does not cache `VerbalizeFst` or `VerbalizeFinalFst`. (While you are welcome to provide such functionality in your own development, keep in mind that the limited complexity of our Verbalizers makes compilingtimes less significant.)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7U21AZearZMK" + }, + "source": [ + "# Deployment " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VrSccoh9K6JK" + }, + "source": [ + "Now that we have done all the groundwork, we can finally move to deployment. This final section will just cover the minor code alterations required to call your language through NeMo ITN and deploy through Sparrowhawk. For further information on using NeMo ITN, please see [this tutorial](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/Inverse_(Text)_Normalization.ipynb). " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0Le2aJvFIAKd" + }, + "source": [ + "## InverseNormalize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r2R3TUCDLi5-" + }, + "source": [ + "NeMo calls upon the `InverseNormalizer` class for all ITN tasks. Given a string and language, it will instantiate both the `ClassifierFst` and `VerbalizeFst` respective for the given language. (Note: we do not use `VerbalizeFinal` as its functions are managed by Sparrowhawk.) To make your language deployable in the general NeMo ITN system, you must designate the availability of these classes for instantiation. (For more information, see the [source code](https://github.com/NVIDIA/NeMo/blob/main/nemo_text_processing/inverse_text_normalization/inverse_normalize.py).)\n", + "\n", + "To do so requires only two changes. The first is providing a string to identify your language as an option for `parse_args` ([ISO codes are advised](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes)):" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tfv4Ee3ML-Fg" + }, + "source": [ + "```Python\n", + "def parse_args():\n", + " parser = ArgumentParser()\n", + " ...\n", + " parser.add_argument(\"--language\", choices=[..., 'MY_LANGUAGE'], type=str)\n", + " ...\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "awVl5nAsMUTl" + }, + "source": [ + "The next is to call your `ClassifyFst` and `VerbalizeFst` from `__init__`:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```bash\n", + "class InverseNormalizer(Normalizer):\n", + " def __init__(self, lang: str = 'en', cache_dir: str = None, overwrite_cache: bool = False):\n", + "\n", + " if lang == 'en':\n", + " from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst\n", + " from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import (\n", + " VerbalizeFinalFst,\n", + " )\n", + " # Other languages\n", + " # ....\n", + " elif lang == 'MY_LANGUAGE':\n", + "\n", + " from nemo_text_processing.inverse_text_normalization.MY_LANGUAGE.taggers.tokenize_and_classify import ClassifyFst\n", + "\n", + " from nemo_text_processing.inverse_text_normalization.MY_LANGUAGE.verbalizers.verbalize_final import (\n", + "\n", + " VerbalizeFst,\n", + "\n", + " )\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TI1PuejLMxdI" + }, + "source": [ + "And you're done! NeMo will handle the rest. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xrksINQoICfj" + }, + "source": [ + "## Grammar export and Deployment to C++" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rP9-dmMJSg3h" + }, + "source": [ + "Find information here:\n", + "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_processing_deployment.html" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TDoVUxCE-Dax" + }, + "source": [ + "# Final Notes" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Fw-9mU7ql8iY" + }, + "source": [ + "Congratulations, you have now constructed an entire ITN system from the ground up! While your experience will vary with each language, you will find several commonalities that will assist you in further development. \n", + "\n", + "If you are interested in working further with your language WFSTs, you may wish to construct a TN system. Broadly, this is accomplished by inverting your previous graphs (`pynini.invert` may assist here) and changing your outputs to avoid indeterminacy (i.e. decide on one canonical output for your grammar for each class). But outside of such grammar specific edits, you repeat many of the steps exhibited here, such as:\n", + "- Use of a two step classifier-verbalizer system\n", + "- Same semiotic classes for tagging\n", + "- Inheritance of `GraphFst`\n", + "\n", + "For Audio-based non-deterministic text normalization please extend your grammars with additional output options for ambiguous options. Every semiotic class has a input flag `deterministic` which is by default set to True. For non-deterministic text normalization add additional grammar for the case `deterministic=False` \n", + "\n", + "We also recommend to look at the source of some of the existing [languages](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/wfst_text_normalization.html#language-support), in particular English: https://github.com/NVIDIA/NeMo/tree/main/nemo_text_processing/inverse_text_normalization/en." + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "WFST Tutorial.ipynb", + "provenance": [], + "toc_visible": true + }, + "interpreter": { + "hash": "fbc643a332f9d7801191710b24a8a955d342df4f32791f7fb65121dc4784751f" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/tutorials/text_processing/images/audio_based_tn.png b/tutorials/text_processing/images/audio_based_tn.png new file mode 100644 index 0000000000000000000000000000000000000000..7953c7ee0eb05bfcb5f0e11252bcb16e4e27cf1e GIT binary patch literal 88329 zcmeFYRal%$w=RmiyAwP}aMuJUSc1Dt@W$QU-7N_k+}+(fxVyV_aA;1?^{>pe&&Ao# zx!f1KF6gJ>8(&q88uE^J6p>$)WYAGaP@tfo(B)($RiL2YmY|?uc#sevzk%zJA^GRS zSzJyH2{L?;%pxG)iCv_$TvQz_T-=SF%%Lpp9qi0MIGZ||o7+2ob8xwY?G}ZCdIu#Z z`AN+q=WN|qZ~pf6?HUn)k&YpmfQpOT0hO#q%TsIoqp8q!mV0%^nn!n`fmeRIFJOK>ruRruMTuO{-ZkV_B@ zFZO?rM5Ql1^nWF0kWrTYN=OQRg3kUcAF$<)`PpOj<%c} z);$peU=DUWP`*4rxwvanJ0;@fH6R{`GDI`JXWEu&UK{bPH(|3v|9fV8QC}c4G2}nf zKt&&J4v%b0w6#)Wx2BMIRaHHDpx$r#KGUHu|Fwe=eB)`5Ka|ljsQcpJvt*$Vejdxg zhN0+q-J!qlXpq|nr+5D`3z3aL7nZN&;Ya{Gvvu^7iPCe4X-NQ<&MU zeB{p?1YMq9a7ylRI-XAQ;cP92L-yTD`>guc#6%3XM@=SM>8U@&kcp-=#5k~xcKuzD zZ2aB+lt1*hqA%E1#sjVr@82^)L{jU<0 zZ@BGG$(j7@aLpI2YE!2@7ww)5)y1&DE6be^{@eE5(Hb*F=0YRGZyss-^M^}Ay~G9e ze4=7V#t#9%6wrtR<7es0ucChbbL8=yyNQy7yfg+Q^eQ&+U5_ko$8 z(=5!;pOZ9kHdP+8v&`Y_VR=W=2ek1aDL3~|GZalEEC=E!6QOuHpXsl^v0zGyO}vk1 z{r3EaK5ejD9^=PEgm0>x=EG(5Uzcwkn@`BTM)Owy{w#Z3X9dG~{28jOO4_*um6l>| zl#MlmHk8N&xOcn6Zor!L>- zk{{tFcdrwvEX9l_O*Ln@eDo+2bfR|Un4olIEKde*&SsnEUpl9xU~$}l%z+`AG#vwKegts zdEOX$91IGp-Wv^U^?9mpmSbWJwaMFQMBY6O?*8oL%v-nXfwElWiKlDBF6g;?j6YPm zrL90dH{|2AAdF#VN6UDLwOk_qt8CC9{!NlzpD%U0^CTWP>h5`0u86mqm8awSB6UEC zNHzrOnJ6KeWiLsuOvx!AT?Xgl)iFnv)&yloQ^LZ~SNGQ&qk+ z*=ug`7YPK z_2b7>#_Wn5+d8La4#&0Vwe@aT=m@&#%632C!Oj?gyQcbKqN{LRMTZ%^qDLYzH2Suw zx8d&K3~KYxC|KFFOu@`4lKD%fd>aDrC0Aw$1OnJ*?l~Go7!ta*3{6h zt+YjzW-^X+S{QW#=0XKo!07#@1|j)tX}8n`BbQ_408*`-^Ilo?Denq9q(!nPE{i{= z3MV!LYBh;14U$oIJb0A;@s*3xPVOIlGfW_Fzp&WOlA&!5Riqy%N;=DNwVh;Zz+_x& zPXhbqQH=?u8LpAZNe`*;5-#WO5?sCOCL##qqJZOvR&(0gtmY20k**$H6Y(P0rXaWh z?L^4S7cXT^#)(|wkhBMEqep%gGT4=xU*eh;@2!p9%j%t^fisjq&x@NnubxgBL45OR z<{;v_BLNovn1*ZBe$TctwNQZgtLBr`bbCkUOupr?6mM?u`L7?SYWS8b-qp}sv6q-6 zK~p507*)0?i)+JNyJ6aB?)3?;=P066qx~k#bTsE87-$|*U!^grr8^^(M|njzn%6gq zAr2Jho569xN+kRQ`^CqF?vgjeX6OrjY~HY>n1ce{>LWK@T*b-fI*iU3f`) ziuQp2aSZiTl@05f-^laJDTu6F?$gPqu(ii%uZJ)j&r=la>iCqjs|1q(#GaJ!d77jz z?ogOvhGOAgeW9MXOdqK3=!R6GwLYtkG!5|HW;p{8&d$|S%(l6%82!ZDg!3lD$`8~f zWIxhs^lb5D=A&UWBJvqvax$_d#w$bPB^e|PuS4G%d#ksSE~{%$7W0PybmzF%ricBU zJ>IZbbn}g5;Em*{IE3O$ufTARkE)IpzZeLsAXx*|j%n|YThrkJTfo-ud^uA>EcMM| z%OmjvH8W7jnRuy^+3>Xhngh|@QrzN_J^pHhys*a<)&Sp;@dbuGigIl1wl;d}U{Orl zHk&Z}NVj=>ncG}OGVyR=n1SVx>_yi;T?wO2+1h4DTZwUZE5ThYY8Oun$4TzH>hNo^WpbKf7Z?Rx_Y6RjRZyl zliABCTRcN?P7;+FK7YzTp5QmEnb(PJvQ+_PhaGtt;EogB zrzwMw^ZI`OVd1of*2N%xEK;zxvv0hc17N^sDzl4$gh>T%Emii`;rP13zzSq_{Cj3)r0h z1+XXPfFY#2FQXcf^GiPvAQT)z7$@kWanS<8N&Sf(X6kWulYhflpj z-UC|nsw@tHJNl3utw>Kd$J+HhSWz@vQ$jULVUb znfe6RA}&q$5xs;dDxe;=$!#9cn{)jq@}McPlBLg>neKGe^^8AJ++Kb{A$#piD=l~|tME+&fJIZ#@-TAO4syO9F-b0 z3U0X?Q-^p5|2Qb|Fo6wOSu?Tu&N*B}I|^^??Yz1>ruTEd&Aj z0Ft_H`C{v>HFLykZqIfsDy1dZ(BeLR_=rw#gKi1m()5{S5{JRPUz<_$*@ho6|GVdl zlbP%+`GMKS>J!s*8FaZXRcu6LvTjI7+Ka2LJ=)?6G1mn?1(DWf`&8QCaK6Z3l?Lu~ z6EQjkQO$kU09A;Py?5}cU}~NL#md-kss9wvRX!L$VvZNNYdedK$g@GKdC}!ttKBs` zY?T0as>xXAg+3IEDg@a>A=6qb| z#G@1|Z<#?plr=!^b6$r1Gr{I(=N(0AKyL(e|@g~dD9{j%=*YR5=? z%yu&@p&bhyX}P5~k zW$R4O4UkkN(x{Ct+_CtS^C7hVga{sLUuAHazIp z<2mef`ge_adv97u9P}0kTymCB{rpcc?BoiHFrPY-U|CJK&5xc;nyl(nu3?_f51{-r z6#F@fc`Bp6V$tk^T%_y#N8$O7at~(5k0lCz?I_+lfyMmy@hWw39pfeNQx7c~DthQk zQ6o>lPeVjOSI6^l^Fp0LNPwO$uaXb(zQosPsr6Puqsu5W#+Qs1 z1)}|vKp;;d+-=sx9+=dVp&74~cDE~#oqFiFA`4+8qxpf6YzKr+fH?m zcLxVEj}MOHE^`QWo2Jd|q7|Xn*qO-n1sx*;+L_bQu=De$Z`@Q>1SmJnHJ!B#-?^ZJ z53yY34oN|lGe=fut6fu~c&&nojaInGrKrLgwO4sl=bqH{J5)?cspxY4}p@l?Jz zxy3W_u9D~B2&B!c+i$t9#C z7Elv9>F$@?e)t7}-_J7!=7!@z$Xyr67Z_)2!lrh0dx@#}?%?x`rQ2t>*nIdMeEm2L7K;TphyduL% zwwYTlLMbitu9U%Ojo9ReEn`0(+HAhJ)?Ab`359gu+GruaKM2j9p&WConH5hTn|^*H zca$$V*=eU)Zo$4#3y0*1|0NBPwz#&83E+pE zww8GB_9>~Gh2wWfyp>7#_{3nm;@_DVv3&jXDzxlenYmH_px%H;ieXRP*pN^rh$-AD z%}(!v;f{6|KmEeqoLXt3X-uWAv;p@Axpy*fL372nH~6cm;WpvLW$o?2C#P~I4#9f{ zS^$a|i9qZZO(bI9gq`5Xe{lYX3^2vrx8Wn6Ov5#TzOH^!Iq(G*1C#u(I)UATm3*wX zNCh%pNPVq?U_1oIyt#__yx^BDUbQTm*cq>>HA{`fO4CAPK_?MkWjY)Qsjff&Dy!^0 zI#c%lr*6$090+jxGjaN3?MZ3(dbP^Za6vk$dwfR{k_;RF!xLG;VFwpuuLIv^!fl}V zaY?}JrZ)x+Y#v6;v{wFPU&XuZ{DEgA&S!fdKYfO+|r7OD|%&rK|~N4ugWM! zoHM?%tF1OC2;m7--ncG-cDsaMY8m1>2O-SFLcW+Ui9rEh!BAH(*DG)Btrn+t(QUU4 zL{lE29Ks$1s)(U3>zB)PTM){a)n!cbzX?hEdkD`~d$8*v>H)f6*N=+Olb+#>t2ufU ztFc=zi|%6km-?fo{@+lXe^CtNt^Xg4uow4Rm*~2FP=sC>iZX#=oiF2mlXr;}A9|`W zAw(wIakv$B>kEW>++N6b==fjhzAOmU2q8Ydd{LWOFNzp3r~R<23-iLNh^hHMl;i7B z8mXt<{PVhgScD!uWD^N#jv?5K=>IRuaifwio~EZdUYr*jdaF_W()n$@naio=Z&8)# zc71u;zw>y14bcjt8E$J-kvZ8mgb0-nFr>W5#`hk*JN<`pPq%gWPYR$W{0bp44Rmis z;ic#xG-gO*fB4qsVmMScWUpBfdXk10Yp^|8@y!6pM20f}^nVj5{?T~u8})}>B*@O? z#`$N~_ajE64j`hG>rON@RisX%VWGSw|1wwKAdj`T!(q2ZygVCZrHTWk{`yu#ge=3i zMA*NC{QprLK2cj4RrFxcpvjM>rnHq6os*N3((>|ti+bp-t*wCIVA#00I3hHJoSYnX zA~d2{X)-dhwlcJTw<3QM1k0N}xjg)s1r1p!Zrn@mbKfe(-iKC-q z@A5LSrKRPqH7vv=LHko85ZlrF^zwYy+g*(bVdal6RH{7wb3WqRqe&O65Inb6_x%(L z8@qL;QRp8_m|Ivdsnuf7R8Z5@2O%gxxZfb39-M+YpoBlR1I(l4&y0T$N2!z-G z)bGXN?3!L!2ov9`f|s6hJ(>6h^^ajVZ&obXR~?NdLo9Yt<2AQ0x#;1!g~KU9*FBvU z;&-oPXn1Kla?ee0{)WnacpwS=@DcnUS7wgx>F;J}9`A3DFK}3++~93_v(d}vqejhk zo-eygnwsu*@YTd(=4NN>Ili@%;6XTjf6XeY?wSlChxSOyIMU^Hx>5WSH96wT0lNdW z5XG6nU|25XeFu)7=7xbEYw!eEi4K=mXG9$~c?eWf{ipP!sU5Bp3=gYWx*N8v- zkmf`Qq?OUvcPT66^Rs;%`#AB%w-f0K)Vm41jPhdRcj$o}8GH6*j;Jq5^5~qo$imd{ z^fb1nrl!~JNoaZbDgI{i`RHMqX|2{n{_w@=g~}cipsX;a`o0JR6hXwVa}B&-90U!< z!4Uwv!V-f;+SWpLoQL7@XVpjC( z>f8Hy0@~UHCoH&9Q&ZC$?$?T%4t77hnB2v;x1+V9tb491?h&pq?4_fEz$@r$Vmu$W zvmjoN_;BxNah>ySp#s8-7e(YOKHG@xFVy!mQP>Q0Cz%Ekx~!S6b6sp^#*<( zr~h46US3dK92OOYQlW8*Kc8HAWrjS>7k6^OxyN*HdM%F>vk#aFT9>3TE~EnDYi0b8oiutDYIMG-tIwNbSMfkX zNkLf|O7dvPy!H8oQ=7u4uN++)dH*=^U>|~^{PyS!WR77Hfr0XNd5F&**S>V3pUAPL zH}pV+AnLg&JM5@x`0-Oy#XRDyQ_?JX*;_J6^uPTRIe4@GvAYj?LVhp&>gwtb&(FK(=dr=T65*>g z5X+pj(9_dnCyC`35YP|#{NKeS#Kwl9?tYgQ7b8Ru&J;-$}!8!gJ$mP<~B4veH|1e_-~A@GJ^6pGOyJt{q^g( zZo_7NetvI%KQS#WEI*%8Nl6K^N);_FS$lg{0RaJs6KDO92%l+pcR)kve6{ zon>TWy959Q%I8=1MK1oQ**sS3*|MPCxd;a3za`}4C`e07M?WKCkd4g!Qf&oI2_a)( z6hjh9b=OmrL)Rk$B(D7MU9xQ&N;r4YPuM=u7zh_bHC^lTV1lY(?7+btB4^goc3Sia zB6koS?Nqfl+eQs-!9LA$wT8y=@BOoAhvA;aY%S-<1QSVm@wPy@+A{;JFDO&CnEzmIsG*3_FMBht>hOJOiE6c z(9t33^tv^fEB#8IrCe$JtsF8AjN8}t?6+qNp(i%Tq8WPf(0z0`BM<#}3-{ep8DAWy z$PAWsq4QvoD0#wulqdrLz>H8K&Mr2c*T#E#%Nv zvXxgxhx0fF(e1Zd`*{(xQcBme8MYi!Y7yYqip-xAE=n2tj#lOkjv(KH=@IIg`5Sjm zt3EG1!xw+sOy{a@^>hInQMSRZff-}xUr(lz?wbE37tb1vgpNCT4SNL!DiEG|>ek9| zx^d$6#N%@1M7;HR=cc&Qj*@|4eSSy^jMLbMt?j{@D=d?e&Ay-cA-~%tq+DFRsb@$e z^!3R#%2QWXR+MX#qxRvng`9Vmz6DTi%E+b^e^H~r)xj|3y2|8&^wT639IR8v#? zqNg_ziObM(w_5pUtd3*@N+^m3s-Ki@6c6w?_D-SUECJ}-c(YZ4u7>8 z(@`IY>H;@YEVVm#5{HJF-QMy9Fgpgj-~I?Ytk<^IlE-zACwIT5&{&As>vSZk$FGMN z`X*?kTzMZ%c}M&EgJIZw+=R+YR!K>3>eX$*+N(ubvj+Rav$KxGuTYqA^_zTuHq3?7 z+XMC~SOXaNM7pA6q7FfgHkjZmW|57UMNN}u!zo$cqLOk^pCAWoV;6VA8Et$B724cW zbHNJnqdm^And7IJuaQ!b_(tqFWF!Nw1_}P$>3hJY#12a|m?)}Yn)ARDp0$f<54GE~qURiR*3Tw^y$+lG&25#%kHlx0F!6(+! z>+I1P=@fgz>*B)<@}VZ1@JPZFA3m^G;+k8N>zyqWYrrPJUyn(T2J4qxsdA z(+rFC`wWL~H{Io{xQR^y3-j||b#+&_13rj|S4LW*qMf^74bG{ zLROU-zUmuU@jTzh8*kHp?Tn;7RaSpeUtxz*?sVZvrvEVHeFU=> z#@di?>953aI3c$5&+=@oyKOI(DB4Q4ml~W{xycT-g+^<|Z=aeWS%d2J+uE{8*r{-FedzFTt20+U&Nrn?$u|~A#t#5ll8#V{AN#j-CCK; z_a&`1ovvGF>WGwZhkz64m$zP@mCk$6%!F_@4 z#R~8i1%SBc3vA1TS0*O)c+a7i_a#r%Qo0iynqN#N`<`$m4>8v3yLidwR}W)HVklPM zmuo>oQ@ZPprxJ3thryehzHFU8Zd|0)kGI~nF~nBg`s)p zH{VjeZcDV@9+0-0_n{M^>5D;cZg*GG9)v2O=m|$gpjaw>7gEbHdCOzb=uVqr5Gqcr z5lxHWZ8K?$i!;A12#g@D`DkHfh?+CT-S)cdb`natc|u9oK_{#3?trpOiFTClSn3E5&)JtN%g z?UfV?-Dwj|Elj~x10p-Pb$p@YuHCNlZ?Y>&v_>(QQzTa$xY0{WzN;@XgK-8v zWjj#$*$A3zL_A#c50MIXVM67^v_Pw+;l7Qbq}e%J9^|S<xgY?L=x6-xr4ClUBIqU*oebN2@~MYod(%MCX9;*kf{A4jSE zIVN-V(ac%UBABvSx%0TAUt;#{2C-?c9ngQ7XEH>*zO9F_IZ$Xp`THdd@_DyFC6ZSV z49mOq@u}GdWQXDKA`q9&Ac+I&=ZGP8)n7at$Qdo;<6?Xl!64=lWp0|S?~w`8=D9>C zuw5ORiRA;0Dz7{R*wOA>=hxC+tEbtnjwI(xVcnZPH~n&`-n~Ubg`T~%oYyBm_SBv_ z+5a`+^XQ3F%;!6vS#JSfkT1vJdag+;;wQ$Dn~7K@n(}_NBXZtdM11Cr7nR~7{vBXJ zo@FI_>;$O9@qE~I_Z)p!XXZ(7{=oTs8#=JQzq+KSJ>z9^aIpul>1W9eSW)$PpqO~v zqDzzF5lbW3F*KM=qQ&hjC`r6p)S@PlpvN7pu6So5B^XU`NqaEM7z;|G?e5&5sEUoF z+kPwNfmc27(Eu8b2H6pOZ$v%j`h#o=-udJ~xjq`EK*YPRk;cp7lt3z-BF}`(m|3XP zPZ~^C(MtQW8e*fy{}pjYZo?VPZ;d5&Y>b|fQcorXm-O4F-|_BgB#?&_JPkj?^AUGgt69C^HeN`)>9RvMfk1|{B~rDe8E!X6YS8hd_}0@dSvOBKXUxEwK-`uy z1v@S&xGs4Qg@!{u&tkkF@WVm59Jn9F#6aHiIQw|Qw?H&SxGOo_uQ|ORj`O~f;o)?y zcr)~5*E{ohcHXDu;F2ZME3jM1HN4j}Q-CvXuXA~TAtS?kk+|C%|JWzV2qyiV#m3jS zPeldK0mVNyhTZGAtx&GBOE|i&t#Bi*Jt9lR4si+7r)G|7vpjSp3uT-bJY2L^FYONW zlHwrU;nAbt#Z}PI!4gPBYYWK=XJS|6^F1}cd)zw3{;OQzl~BekA@Qo^d6^Zq6_K=~ zl45{fdIUIx=-$+L4ii(<72lnrIoq!5TQ>|L7PdV*k$L%G0a51@3x}xz@JT z8JDuFBTr!Z9#^*^R?5Lbm7G6K9T+P@^P3;D9C?w^SzDlUA1i3 zVI)H$qWXHu6LPcn^D6C@7$!28sOM*A>lu&*>g~Nkz73h{k!`dcbd|A z|G5p2g5n6lsPg+s*fLA3;I0Jx|Z^7D)*Xv^you zSgN^$Nh!7%y8R^m-9afV7`jyaD}J90U*|O|rs{gO7(2T7EA~U{Ww4Y*eNi3T`4XQ- zHECJUsG0^jYOsWzb?O0=Rqk`QG47j+N~4i(xv2fc@if++yvTCikF@SEplts@gQPKr zcEv5?-m}%I`}D4y96eN`jtC-Rk0HLZix__wRyosh06NAuAC|}9Tw1k{J<1m*`^1;1 zRj|W1Po~~WSJ^H*3&nLVCvY%V=0oJcT6KKMA(h7|>+_szO!8~WK62w5ZseOX&h`^} zM5`=%u0By&6uatYthuynkgqQkX-&MreQArapHW!p)q`wm`NG?tvO!n zN)8?_*8I4$@>cdV;2!{V2EAH@QIwOLdwqS~+uI8bp|u80dcSe+oUGJh{KCM%;OFNr zMbuR`<6Rwo$nHMqe~aErjK_IA4=J`clZx!NyjW{~a9>OAz)BuDRh_!+Ef4Z%^N(nC zDCzGJ@_bZ|=VsOTo%W|itiAxQk5F`^u-r$dF~%W2xPZpl|JD1;NA!St&@!PwqbqNZ zwD#k@?Mhp8V|Ej^;E~RRI$=iPpKGcm%n?P)0e>nwk?+z-1Zg8T;p@Za0nuvTI?p_5ChG2eRL zU-#gbvO18IT`4{xmjwnSQdP33SkW?hVO80vu4V`gj2uzsy=Iloa>|v#g&A&o$?oAu zPQWP}#4|H6(oqf}^vQXbhKxh~q}D|idk)9OMH@ZOkUz^o3mk~K4QVJWcM+Gg zBXX`_nbz#%+0p@K8!ksxYP208Uq@%ai(4LQplPP}I6aQ2ko216mQ~m?W zk%%uD7tp)Pi-Tne$*Lqm7h74On{fcc_OAV8M_y75bPkeRaz+6#&=;pVlORQ4(2UXL zhNLJ!C*HrDU_SjD6Qq31pRV=LDq-3Fv9d z`5C~^=LJf>?y-W~0H52daoMaP%t;^Jo6_9mB)XWG*vspyBLMKdv=j+qeeLZ+d3laAw+VtQcaeZspk6?tW0^K2JXEoZl&jJJKhTz>kH!~#D(P+M8ZhwZ+%Z@=; z8)0Hz)|jPIc9Wmg3>Jl!hzC9FIM+wEX|x?-c+QHO9~)H0i5s>FyJEU)ftCGn`xR@GQVQ1xx(J6;^`^~bwbMR`@Xo8P$Jy35g_4r9caX&!Ns*BT!M*5^z7Iu+ab5wfttLSI5q zTJf#~2?+*9hRb`+3kRp}fp&V}EX;G4>8$x0$?{HZbLzYxu$TO@$xpG}Z$$=D;YrWq z50XvUYCQ~fWqG=43&8I$b-UQb&Pyt3`0a8NRUv$zkyJ`VhG8>`oBAi1>99k^N-zt} z;M)HF{;8#f+jpgI*%ksu9dGvJ%F1;wCAJO@h612rTQ1s8A9^wUKHAxRfDp_n>FK-s z`=R-U;q5xI%KJNV4um~t!x1A35>=!>9r`ZMY4FIwX%3Zu{!B-J9E$(TzMg>`OP})@ zO~>P1y@K^HqnXl6%7@1-9AAd!at0R>(j_thua|BXp96Hi*DG6TjBdg3DM0}N9t=z# zqMRBXn`_XmJ=#3NpUd9?4QuJbsr;G$7dRR07#b-YD2o-(c1x&tLVId6Wp z;t2@tUv+bS&^s4YS6Nsh1tzchlSSExw{;G$xID8@*TFrr?{BC|JA3aE3J|u%hdr;{ z*GWkh9-_ARQ>ovZVm8#(JpgX$(>g)e*Q;^0UaxBlgoP;4=5PKcz1Bar9yoAB*N2kC zhC#oZ)KoT~(3wpIiK6}~Qf7jF7-yKv#FFyGIqnYeY&0#~k_oy*bb8+>rKg84+lrnF zyguF1u(K!B)UaFI*f26NQ40xWkBxm67Z-0i$g*KQlKQsMm^YH&75H&bfFStpW&rg) zr)G}GS^1ugL54G>Ls4(SadrJuqc_pc^k-5_4v^~$MpOms8Xgz4R2%wP^3xk%cf_ha zG4SW%TTjQcC18!fe2mkj>VGoNOwYEey05O@P7~pLhjy zX?iimB)u!C+Y5yGEas7Aex8|5`AZdcpA4iF}SqtADo1Z`$-lU!M83r&2Bd_0HW`xIco}+^j#( zHc3fbGE(FtDX(*aFGY1%Fp(Hh(w&7<23Ch^9ltFP4$lwcDjw5UM=nlqvvTquExx!{ z0J7-?d0>T3A~0++lZBjAdb#P9R;{n*dDzu<8V&f)Ld;F9o-+94MGpsuOHOu4Vr{`z zLkxtlsU|8lWBMro+5xj?Z#?Ud1Dy;BzKr_a?sU-3NSpU91mw1*Y zYqG#?`xy&6u{7!wp0%Z|&8e<1 zri2ijgSp^8HSX_|UdAeq>J_&s3Wj>~@E3@G+bJRB(X$zq{isD$@9#+Hm~Ch@4um}~ zR~ag5=BU4$)}az994&*6ut=g_b^f6-FDTzhWl3E4kQi}1KEe<a}Y_K)`-ZW@LFD$5GLYrCTzh61cFzsR-yz-+E{I zdZnp4OmcxcHSx{;{iuoj&(SWgZ$FWrLbysEo1b%+!y_W}_SYhF-9(@f-Y+G~SqoK5 z1(n3p48e{}(lSikG`WVWkQTF0QBik?NF8Wt3vW>mAoo9RK%mE+Xtt4wi9$&K z4x2`r-R0;vD#i$2#jPZ3Osgx59|BkSg0JM0HgR00@2Z=s8GQFwK5pHH?-381lZUD6 z7GW>8_>Ms36D_ue-RB2)(Jg@x4m(y&U)QKOlGxTEg@eR5Kv_!+pP_G5QXR^{+nqXw zuX98Dc)Qs}OlRiNbphLxUs=3m9hE&Pr9}h5Pzx>>T?iDcT6#)8AMLpIGj?2E*e2autR5PoV%XFgNl2!=3Ny(n(EXUN=m^>a z&dN=lmwmyinRx=5TxlJ|aEbc~QdVvDhkLWM#M$e_f;yOPA~#!-)${YMZ?^+%!a)@W}_~wp_x_b`LR-Km~6!b#QCgIe30wVeD#8dd_=iTKjuwLP4>u zO$1wjK?X9Ob0qW|NJ^%^(bCd})S3?CkdUnG)UK`Rbv>OhH`y-tCDO=1THG;|VqK0( zO>s_;5Mad4y}%|CeV8Ki_$W&_LF`a__2{9*-F2NIQ2XfrkpCtG&APn?n$7-3Bx0uI zkxIy$*JJ)b(=j~_KxaQB+t%f3fkQX{sHwcP(Z9~QjX(zIsdjJx*IB?4l&3D{J>&C9 z8vt>EA1wRRs@vKJm_OeJu1~z?-%h;R2_swHhHzrPY4gHsAfb`>2w3`#uTKUQ9jCO0 z*Y34upEHwsaliPBmism5Jw!kB`YqK!>D2=GIbLJn5tx>)R(QRYf3ji$9BE3GUJ6lDO=F-?8y14{1E zgChW(i4E5_Nm1|knHv0{1D~1-9E|NtvEwc+re|DGbY6s63c=@>6CP&p9GC;m&)J}+ zme#q;2S})CZVP2|wg{VD^@>t9h-CONBRiM7d!o5(8-BxL@HQ#K@-X?_k>}>tk_lS& zOPbgsAsMN8%I|&1-%yBQ!1HO!dpuQcL<3KhJY4x}a5--{+5119<^Ak*AXcoP#5>yn z)&aQ~xgI#3pO%?J-7F{}jb3?m55Lp*eCHF3Lk9=Nt4uWBBK|plMg$8h{`0}Sd z40Kj(tUM8Zo@X4E=ShAmxRp3O-inQxs+ho z2>r{&aZLd(;Hw^2sjyG}bDIO;s^_^$(7NW%Lp~fuW;QF?|A3 zfxpKqj=nfMvxT(h`Uay3trn}u0|Nu$;Nc+`8@}4eTj{ejHe1a~!!#|(Vi)tL=l{J6 zFrXR@?wv-=H$NSdiHt&}u4rd?xuXf60RptA6Txh(d&fc_#47W)$5Tlk2B9!QbswJ} zd>1KJd1IE#--gzCbY$4}_5j2m=dpqGUb>a)mwDg@n^T25U^n)HP{KYl-&MS(#mz$@ z$Ag#m^~Md4nb~XKY`k($Z--mzH{s@t#Ex5aLFJ33vGqj6$d>VN3i9)<8@jsq9HYfi zd49L!@t5O8oBaBQuodjuz-j9%7Y z*sCGe$u_Dm-hSZfK5F3rxmoo1yA6`s)Wfcs;MyUKRP=rPQQdfQqmgHATMr9X5%8h% zfSp?oaLQOyQ25WwySUEwU`Y~A-0?UnHpSh_Yh1_~?Y5VN)Vl%F_DK;ab-;PUg6P+@CDKXJY4W@na*KKz^$325FR*Zz=UgXVg#qHyrlDs75Bj-o6-h+ z2`d^efACG5K0R0Dnw&S)aW>DyT1|$DAvi_FEN{#4(WUs;JV+-;1dHI9j)ux-16TCZ{S*-%sB5wdxbyZ8eWqS$7i9VAq%gOkBxF+}+1-sn6 z#%vb(?ox21N`7mhH&~RTKZBnx%gWbA&(7^+eE2lyh{GSTeAnOjtyz#v=oc}kJxqCi z$>wRFU2c42m?Ay<;{A*GQI?yFl5zlxjrojX z#gaBUU-bXs=_{k+YMO45pushGNN^1dt_ctb?ykYz-8D#%;O@cQ-7Po-celYAT<;;z zeZM~}W@gPf-PP4qyLRn9Ga5^lx$bI?N4feOUPiFk1kZ9ziauL9G+_13r=Q)Fu$rXJ zguiFQm!Cn+BS~9KrgCGQQo<_z^Z zHzW$IpQTcPbF^d!(l;7&=d}jkq-3Xcsj^y7R>ZVCKtK93g%Qbn%Z5X%Dr7enj-u3m zrF07SpkG*WGP^hSG!c`~awkIZww`8oZX(cEu(wjn=nqEsNol6``3LK67%($8dvFVH z*!)go$eD(U^b5GXjbELy&M>AXGwkxYunRvJ(nNPx){GAt zJs-`e9fqMAx)dw29636Y=?0sct376NSWM#CdiCaE3Non;!+GQEif{+AUe+^SJyn*h z%-g%ICt*;3_}HT0WQeZ=L#^5h+ssPQEC`}T(01A~o?BLJe8}}AHpsWLc3UH?$tM!X zz+fal(IdADUi$PBfv?dVCn~RI?6jgG>*I9qp9F@%8FiNP7mp|{A;IW~rdfPi3W zWySKYwDwpN8&F=(FD`BZBJ*@$hgqTGZW_;Y*dQjWb2m0M|FV4_e-JxR?KuN)?>$^{ z%~P?&uYTJxI#$}X);zy>)B}j5t8_f|0GR3@s>O#l~miEN7H4`c5Ed$lzB}5#ecQ% zP(bc=ra~bqc^aL@HURA(0DFCXt<_||laIVmuGQqkDS*&?CUZe_eDaZY~KjfS?;`V#sOJ1xz#!8#}R8a&Y`7 zcjBkunHqmNNsGdN_HBeyzErLM2dMo1)BFQwUBw~SbaUfYE!Qeia{=W1n1JbVkb`Q2 zk-x*;1?^S1Gq5rwJq1Uy+N%k2oX-FBcyQ)pF=7Db{r;u_R2Mm=p1b?RnVGGMuKRpH z)R*$nf~vcMn4QVk&^^`Fv3N;yh&&>C!pHGvkQxzQIF_Hh!Fz|n(- z=|zilKK^en2Z;$x!jE03M6RlJYnsm3S4u@(F$rB!12S0rG-=QqZR+jd;loIFNVHl; zCU-KrfcAJdx{AZjP<%c^+gCSdxorBz<-bO+7A{i(%O;*5t*`#~lCxAp!pIEQ38-%d z2ay0QIW#;xd3eQGeeRdXIE_`#&C9DgQr@DhA?$H=wqw~9^g0kf62UvkGM~&35AYC! zW?HoesxdaAqs z5B_`&7ea`WbUV0O6kHPxdBYevD)u1dkllFI!hxDxG0DV9Q&nkLi>Z9+uk`uA*@>~S zF<@14<_?=Y?^t(scBFNbfaOVK*3B<1MbXp*_IH}Con6yTH~Xh2U+?;LVh{A+^{*k2 zrsIqc_#ZITWAPp^;#go~c>EBlq~1UPHbzg+N(8V?sw`&`AM<%VR;t=g8zX!=o{0UR z1xDNtv^fTjt%@5a_4#-U5l=RRm;JgEn7#sbG<6bq;6Rr}=L9~`XvQHoH)>9H4{hzP z20jiEe50LBT4!>&f>tssAUW)AsVvOl;o$+8mX?w6YsDO=_)?-#y_T67E#O{VtE=rA zjrnpkl~q+uc`zK~Xmr3DhDNNNlsGXmkf;0=E8)Z>-q$=#-&*B=va^^=c=U$^23-~+ z{y8DQ`eL}Im!NH+I+mp~5WTS35WV%XUvM2}7K^B@jnMV1H+{ z{wcQN$?&<@cpaw2#ZgxGR4#uJ^-oP;3eOk{vh^oMu$Fsc>AVf~)RvZ(rhxhTm?aC1 z)X_&TbcZd7h}_j@)j_~n7!>%|JsA;!3b-k*Rk8A%c1wIW`_FQ1wj;r~q1n?IL?D>P z{;kTB#A;}K?1ntL^q|>$^A2C!G3!E{nn7@e#xLFc!uM619l8!LA%62q3zIA3!MHC^ z$kfzgk1bB+5YLs?wh+YQzS&c(`Rb3*x@62sC)Tr`l8pU*3rJ34xMZH*AwMbI{ac!9 z9mRq9^-42VR087DN1<))>~IqH#=K$7&1NmFG_|Xl?;kvx*K^`V%K}a5PRqx-`gQtr z^93gU=CLiZ!*UdoR;xQ7*H%?!x3;!^K%r-5?f@t5%rC0x7_br2b9}0Ngps4c0o3ao zM@QibMZH~J8wrck>l#A-k5IRc#~e)YaWE5KoGjj8Z0<(ux@M3?W~}G3Ytrz&@V;G3 ziuhSG#ja~(Qm{TX&UrqfOZtK3PHbohSAMB}}|ikR}97zTS~rXgNkfR%bgPqT}Lmgtm~keic~j zp~J}vCm>FQsT%`(Y4>7hI4&VU;a@f_L>yo^%vBfSE<{;mD5wfIx-;UI$(GXtf-?X* z{%iOu1-du5H>xNXbHpm^TJG*w#X8E>6$BKMEbB8mn}RSbN}W1YB2i?5mKJ^NH`QKCukHJ_kEn5E@=jri6YIKGF5Altk+xSH1U zxI+5HXy8yVh8eNN`<8HyC>r?Mrr+d7v7nbn##kGC&u8bF3?S7C)rS(Q5Hy`r+&@;YJ z4e~1}kk#v5iJoQ9{+F5&ONMwgKLaa0UAwiN&K+=a1SA9T5S2pIp5RN^LKhQdi-~pL z(Qa1WQPz?&u)8_CNfor}s7^_9d8$$UVp+X#Aw~nTWU6VV4s@>oPjG{7hPc|dsIa{? zwKGiB@SPE_jdRh64x<5q)t55|Q8D!Oim0bS4WqaJOP8aY&8fs%vNH*?!y`4tHGhli zy)-lg8Hw=b7Z$z)UJ0O}uZ_+nCm3|SWu4Wjrk7c^W4biE-!$L@J_^%kpr2js_Kb5( z)R#Hraxk48x+7p_^DhuLB-*%8V@;I3Jzg=yRR7<-%6d?5zN@4s197=H$YSoEvMU9n zO!B2g=o{BBiDVsFsY|FS8HoaW`iz^S^|4D@1NC-$*k(|KCg;T59$LakYJlwCQZly* z>Dq}zt;*d$821{3W6dv7>Me?RYRbxWg+gY(tjPDxrX#&>8W&lgYM;A^Oa~ck(#4uh zc5eZfU&&W*1%H#8$aU}%IgtY1^FhdCa~It0F+69xaif0%#x^W#_}_?y*pL>)KO@RB zKXtoD+tT3rChbF8E+l{6XafIi76710a3rz8aHJQfV>$2%!}H!-)2SL)aQvO&%f%8Z zfMNe(Aq*ilN&sMCTq)E^UykWOU!L83SJb+CKUxzIMokrEbXSa;HYn&Kyazd*P0oYt zPN+|XCNqzHv>x{(B9#5lPk`)5hgFG04yL5|6LoD}PC@)>RKJDO9V*r3C~sG^mt_)m ze~RAMD6_m{!vVluKTMu4Fl40&23jOlsvJL3O#J0Y0ZvGog-e?H@+>o z_nW^R4ILPdv!Ik(U<# z1)nn-BX@o%mY(|>Gml5*io@-Lu1nf2wR*7R*s5Mh;a6W9d|>r@j?;NAwtqtA34A8# zwvhjKB~A$#4swyUpf`6@t7TSSQ%wOkklEX*Iqi%t(c$HBha&g$X3(tbGT&33Uj(A8 zI3I@@3IwlgJ3zLhc5(jyyud)blPb{km-qLsBVJSF`M5~5UJ-OXhviibU0$ytE-ZAC zI*pRTKCc^;?Wcx}owKL|K1jpOEv2f-%z4oNU#E~Cm9!O2LQNP#S_9Fw*sYhdSnV2h zPzWBp-owKtL$MW*XU%b}{ZogT!;a@`4}m*RZ2DCXTyz)ClG&U&B3GL`!fUF$PagLo zC!`q0Gv_u+s|zrWCdiu2D8O`c`rr`m>m8B)`D~1@G~%QlcBX64z%$>Et|wvWqy|$; zCQmU=jn~t)zQ(JJfn${b~4gdo9M3U_e&xcs073oyC1V z1+fJK$`vnHmjt90FyS-Wmr|;L7NUkfp3lw`z2})K}5WI?Q z1$7o3{v$cFqq7N`lsb4|R-4Ak3WgMdbDeid zET|&e-e!)%Nt^At<{&rglZQ8fyc=ZYmI)589#u9I`Wx$S zEECI}pFCD|sLD=_lcxGrAD-4qT;qyLWNZn88Iy>C76Yq(tKoo6K^PL+<<4DltaWJe z=v<_lOhRJLzgy?SD12Z#K3qd)_IL4>g+jICWpN-tAh`~6rxU~$f>>WgZwbPp!Q1-dds~Afs!qV zw9e8QYJKhs4X;d1G8pZ|@i51%da?Pz4I68Fyjgm!>a-IR=E=*}>TsE2&mAyKR;!3! z+=v!7;BMgI!8+6(c+K_|_dq)qVU1>D^76xlxwWmMzA2QXb%vVRz{(H2!5prpM>LGI z25UD2B0)C*BrM7sHfOt=`+_)0^K^gxVWSh1De-`wl!)IGDs}T(2l7t3AW|xmHFAf( z)uzT<0TeIpUZsBOE49;ERf&Uvm>Qh>w#@%N3Zyxyqsl4r3ieQJ0SD7tinq&@k1};u%K3GO>;Ovx|km*Scd=we~9P-$##^7hw3$%RGF)# z<0TZ6Hr+-`TX4DCa#D1_{>k2Gb-XTzTSO$SEASmE-qQ4bFW0=N z;q)51Hlsctm`g4>qwSC30ELW@))eng)k*5buZu!Krur*A@x=VjF-lg(l8Jv!|M>%B zwGHA!@f%AH4vyDoGIf1>NP5$|UG-WZ^R^34OjE*qVlqaA?2Q?(b$uLhbHFTsT$Hg| z7js~nsPdMCQvxpWh{;`7v$0<_`_L2Dc$GfL9A_&XVw~kzm#1j5r}D)Ap{f^bzV&HSr?RlEnA-bydgnFCeJFaks;M_bdxcqCcObMt#i z_~9m1&D^W+^;f$@Kh5@z(p$@7yJ9?XYE^yV%eRpJv!iwE=ZdiP&uhl~E?-mkyFQ{Ekg1%zN;&%IJnbP!dFxReFE(pI zZh_tLO@_Z8fxctAPhBx3cN>{$Os4hzl5N%-cYx@v%BSx~wxr3nr^*$YP0nu>Nr`7l z>t5@H*ZZ6DYf$J712j6N@8vBym{~bh9#ctEsDAykZVo~a z=kVRb4J!G@`XumdwY62hTbJXJ{9}(h$Svj^zxVyN9pCkcCQy^EpFn?~^RiNOJ$p9w z^g3B@+p*wEj_)}-z&_ZYB=sG&^SC>`FAH2F2=r+f8`aU2zF#3qhG-DF`^sdG+^-=% z8m?&nZ-DZngq7<4bwF9s1z?YjfF3vRT?(X`z)Gy)@QFr#@zQ&J`jYnfp|h?HU=uC; z-iv6ZPr_f*>fK@GN4s{$jT#WS*^3?LoFS-mItTYULdtWp$yX|Px5|nx4{$sGx;ePo zED##Eb44uxttfy7@bS46gu*MkCHwipUa)Y8_5YX24pIYj4NjJKxh72x>o4wk$zUqi zPivANJ8Ar3z3l9q%_FDYo(HdQ9tAe%hW%;p61hGO0t#bbY(ju>fmbtzK5Nq?%CBGy zA)aW}Wm)IkmD7fq!+Is+?nCNxzvJghJ zHh7{wppj;ewAL@5;xlJgS_uP9gs{06N4mu6a0AokB~HzVdS_=EcAYmr9p0nw6hk#b;F8%YWZ~z=~0YD8*VN!Y1D#ekUfywmn&9HcevE z+b1jr;GY2Z#qfcGM~;bEXnz)CjoedILJ5l`b-g+m`z5~d#+>l}V^iG#wvd`+>}t(8 zucjPk`IE~55mq_UBn-osCBR`U*V21|#pD=t1FWrEmrB#QzfU8xlSV~*H^4Knzv)C2 zB}>?I0kk{YZWUsu)ydW@qxOQyWaaWJo^=*$&3ZPYI!v>RQ!<}z3B`~#n!~7UHz>r= z;_Dr8U%jSrhv1Jkgd4JZ^qxgrtP2j-J=ZU0iifrS$avv$d?CZywCg+&V|M!Y`E`q= z0PK2)rWEhWy_PIf9-fElHEud#r_V9ZYcltjWF zY?n}`H4Xt$Yu5GOe;eoP%fYp<@Hz>b4B`0rv!dfE*2tJK`YwAyOmS6G=#iTd&iCvDK+`V9vM!w zQ)l$-rT}Z|kZ=v=uM6b*(rZ6F9v#=}FSzV%WnMnPSOm@dv`=4 z?{q8_wSl<|l8C4_k5GR|y4kn3ABIX?<*TBE@0dEU)Xn&I@tx$W5wez7GX^nlQc^Z) zK7>fN+IVD-0&n)6$+|$L(?|0!3_~vuA;;?Z1=H9C7egr05$FYRz1*i&R9%ke#l(ce zYkTlY6=it$rH;;4kC;~GOwChyaqqqqU3Ah{zU{gZ#h59mcS1nLA?6W!DGFCk$i52O^X^#>3S1mOAX{}Vn^|?prgzx&E6?Y9xU9e+z4Mv2CzBf- zd|dkUEh^FHk>A#ZhS+GYKCX1&0UY7Rp%jIJE7n;#JIu*ix8)&df^W19JBD0iHR^v# zx%cd-1rz~@CFa!xmgA#dWWIL}CY&?00F>Td_|gYBNR^ihR?(tVNfa3Y-bg!akt)A{ z`#cmH>8$-`LFOU**yW&s#_(UB|7ih2mmGZgkp$IRdYc!Lx(9gK%sXqDQl?vJT`H&?YrrYC(0iOk{&N8oQD#Hz3+x>lS&vo2nv2*U5+#VqI!&DN)@5k zNco93y}KD#S>wH=OqE*^x0bNq&Guw`1@H{UHa6E3=0yos$NgeH)tD436>%uue$HhI5ZcKV@^I z+1Ci_oyZN<8hCo@a&AXkL0%-OocXcoM(#Fo$)_BrQu-@XWL2gSkzfer&q1|&0!YSE zfB8z&{#aZNGe}M$N!Hnu^)TZ(#K?kipb>+sk+qkBJ59V_`KhAM6x*JDEISMw*}$ze zWs+C9IVr2w^a0N5-YqO_G*fq%Hb%8{E1h6zLIsu5uD5(sI#EI4*Kj#^W~KeHxV>yD znBkr~aT_T0F<|Sy>nYtaKs+{e*UEGXy#0MY z!c+6=3lJyOe>faMPTkDVBKhm8xNVN5PZmum2vOHLeNP)-CJX*4u%84+ecWsvY^=rkOMg=o4y1^+F zdYV8D3UnZlfYLnwoerX10Fc^6Rwh@#evcA51RZT5w;Oqc8Kujitx{ zSiGy4o#Pb&J&2+hJ|D`SUlqggy=+tbxI`DuuH5-7L1;GMWB$0_?^(?{dg!}g5#q|0 zHt`8H1q#(WpeOm_;!}=j5UZiW8-AWy2$SKSEtSlvF>-zQ-H~=}Vt9|ooxWE)_a#_p zB+Yi(sSbr^W#Wx7fr0pZ~6&$ttlA#dx4vC3vj9tKPJ?3n|LR1-s4e{6eF7X(LtB_ zEpNf+;PLh6)*xVm3{YzDP(&3mZFgg%HKKggUNOr|+Mt+86MW^>=Pkvn{4|(4n;q2g zm`um%1)}fA09;Wqx$-?;q09uTI(i~PM7!8j9Nw^hgh|=M@r{O6PM4qe8-)8RZfo7P z0eAf&uKH6M!|`W4Q{1vH@q<5E-Yyq^b;)*9mEXGU)v2d3@{QvfxUC?}_0M8b=iPM| zG9=#KxWsyLWqy!E+$54XlKl7%)FvpuVSPoa!fyP=pj+DI(^axKLu#y{Ic232x$V7a z4i9JfGMM-rjYGGW;OF4zCj=V7RFjg0=5^dzY-X}4hSnn`hk3>Pj}ML9BL{_DEg-$= zY<^25Jsej!Gr2f|2~F=Nw+*W%Up}v}aJrgSI@&WL!K}p;hhp+Cw@QJ!eK<1@QB5Da zZX7{yYQ9VxTuljb6%2_62^v=XbS8eT9d83~0yb~&xKdXI1PfG`D z92egOzPWRtdLb_1m`S;v4nXMYVe?G%4#Pshp*S)y>IPt#$WBRDMHHtWY`{L|zJg1L zVgwQ9IGS*3Q}i?l{&M%Sm)-mw_Gy`qoGQL@CA{pC;x-}bwjp?8Dk7$5_i8rIyft9D z&|mAbM@G)HrG3Q8+1M!MKw2G*hg+&pMhpZ;srDD)A6FmD@m8ORSVG9;4q<0Qe9(!mwwDN;~}NgB7hb9N_uqeZd9qvh(r1+!iSB zlPr`|8NC*^7-pyf*_=`0A1+XGo-rSTM{5wRW&%;HYVWii_E`eU1_K?X@TD zl4bm_A-R+%fDs@`0AYpEHPD0zG{Q?|*RGIu1iAudr@KS>@FV&j>+Ro7tq=bI)RIQc zNUHyRm)bMyM<}P6k#04KFZbeDb4Pl8mtR|-?6DS5)1juTh{*kv7vJ0U(zi-%i)H4gi=&ih)gp!DtweQ`zfYs2CZlb~n|UN^!f!O82i;U5mJ0i)Cd>_GJi zV5*SjTwZ|B1;`Bz_IFVzZ>csu(+S@PYhNwze}|pN!5;@0IWYe!fPn9Y-@bfc_>CZs z=~b#?GVIXnq0dke&MkYrVaclCLrKjae6*FIA`c69&a1XC8^Fyx)25mCRsNBKV?Y&E z7^qBJBepB&!$|V2liWz^Z$Wv#NZgox`*(_uW$3(kik)+`GQ)KCgsK7M0&8kg8u`OkCHG+U90u!r?sEX` zj7Zz%JIpC3LyC+KnyJFqb|eexyDF2_`m6$)11T~-8=>+{S&pTxuvQh{M~aJ3Ca3pA zklj%ZA`i!K!oq(gx`qJ{=bxZdTl|b``r}Pj`GtF)a6GG+^m=+?^URCaD>bwUJg;^P zZ%P`J`wZTT=m-0$YjYNq{+}Bvrw(o-h-x@|BNxF2!~-9=CK7W8N_l?*)Z-_lV1*{n zrlaCL{Tu|_6HoboS>?ms4P6GhBiOOLpCN~9^TscfPSh^ z(a%BC&nL>Lc%2I2<=31)u}w+Qd!WO7WN z7{!ci!Cy_6KCIj^of;hb)?B9Z|QPEk#!|THCm)jlcMxLOaIikPXU(}1n(Qr&CDNRHJLSzkcnzX&%)`evVh(8was)24CR6JF@2E^j5ZzE3Cn}qY^b;r$~`zSgZA`Bhg z=eE1NQQVEx^K~-_$O%j@eaN?iPI$giFkhu-FYAGS_j7PH#@hU)c5WTt5Xp*xH%Lg{ z9C>heg)Y8pHU2oh-W=vZ>H5zu6)~sPkeWp&yvxhf`Udg6Iy zBPcTl(H4l|m6H-{tUAc$qs8i4*X~t8aj^an)0-Pkp9~|O$TDd$3mN5O{{X}TrK>(< z&u%&w&6xrcYVHOwB+oR_#&Aclp~~*AJ**=0mAC|F3qK50SYImxsp>UrW6T(thX)N+PFd~vur;^6m*fOz@Nec2F?W%G#V z)V;+@+j)6)ep-Ju+R`|WBPA5Fr@L9P<#JZ1jnuo z{j8)H`1-hL7_%i?{gfVIxoRJC~82b`( zy`_F%4o+>b!{B^V6t08IuSQTr@718`PZEnjFXTsfge2x-$-(&xgT7`v?*Oj~WcYY_ zOc6C_W=8J5-9*l6iyPW%^h^r7NuJqK@sDnjtS$AKeKJN@UeT`%zhTa$A$Nxlj8Xa{>Y&(qvbgzVaLivEXL(W_70{>7&xveZ1d_atX5Fu!w0qu|+eO zcc5IC6;QrRL8Y-je66CU6eFX;nswc%T{x9Q^1H0v}c;yImK1e=$+5LQB1a_F(t!VxLum9E`{9ja6HE z6E?@t@fNF)3>0vpx8az_|NPo=(tcYf`SA)y+4-8S>}7YZX{h0AqO}N$<3~4x9Y+1R zvkY9uq^s<3!hYCX$I?eCvZv%&%a^@D4VO1gs6->MTl8~IiYvS+wCb5P_HB_d#Vt`P za#@6)=!#Uu6@Br`GuCQ~Rmd1ngMb{(ZZdT&LUPB{XkiGl*WGZRxG zPs#b=S}gR))kgIr!MNoezsQ_LuBlSUmcb2j0eje!Ci+$OE)A6{T)X4O? zBa@tT1pf&y+9G1JHlAf3NdiU9WqO1^y9BA_PKyFc@7`kRLqxK}WqtOLeC28{;FMUh zwK@7sAgjd;;v;ynoE}gSc%u#Ag1Ag+V)c!3+3X;%-vW;kArej9l8ia6l^QJgJ3?Ym z*&UJfHa)6Dlot4k42SnxFJvVXN zO9(Q#i1Jgn|AI^9^@<3zsMw7OB(e=hm!atD&f@&4DWJ%Q@U_>f;2u#G)cBpU$@4*Q z7#%K5t2{-6Mj)mYYJ zD);S_@u%p$3SH#k#u|UFdOhy29^>HQFPbByXbAtXEsRRs%v7gy}gV z{-+25$Qx`SwxZ@m5izugUOhIf2I`_Ng9n$xk)FSFbfpqsc;i1hU;F-rbIM!|Pv#5_ zjgHXucvZ8$_PP&a@#KLaRM&={@Foqdz;>H^Nsm-QTP~8yUS^%aN4-omdT_<%yefeB zuI4kXYqieBOx7aiU-+3B@n2o^)uPb^Knt8|?xkGsTDKqqMCA_kstyRCT@;OYNemvS z;kIUeKqs-X42{S)HJCfv!};%vh;|OquNsqWusOC9pydGC=78|lV*2)Zg$K0bC(odH zM``5#6cM-h9!M8B>9Mm)uePijdqK`1S^iJ4viakmsK#D-Nu%{5rp1O^I!ua(v<~smse!7RssQm-O zhlYe-<5U{|wHZB{Q_eBT1aL#|TU~NZs{G7UCvEBnVw0Bn%5s1a0fLt3OnQ7BU^XJc zJH#z0N$r`)cA)BIDO@48sG25sVw^G;2NtAxkwZs2dPqlOX0pYp$u7QZbx~#BTP&y0 zBO?114td|C?@tTW4rD=lVT4YRC*G^C?CY?K_2&wkj0p>H=E+V$fM{~xtMjtS?Lza% zlD(S&WZZVr31_O-`N)%eOpmkq%H!tNBrPEcCRN`LL7u8wcfW>w$ZY~|r`=4&zkgVJ z!I9PQ#%q?`qhX2ji{O_CzIJWA-*Q=!gLgg*8mhmy6T>21&s{#OC^IZDjVk$xF94>2 zzen$Dxq>efVrv4r(mXEM$AEmR6N-$ZuMf1-l_Z*OqHy#LEsxk>p_n;Y#goGCAAdPTfM%{+u0+e z;nDqD3|l|T62SQ45s9PfQ~3VIqk6c)W#Vgf)YwK{I%GaB{+qLW2CWt>4Rh{qwx~4F zxu?svO>5JZaBUtcv!o4|Sl3-Hv4rl(W0}Lee1_{NrzRyG;9TpEy%g5S9WV_s z!#)}4X~J$1pxqTw5U)v71pM9<_${6AqeofHI4;mt*Tbt|1&j%RfKM+DuE} z(+q#4X2!INrp1`jogbnWd~;@Oh%>YZUo*8{cG24oHc$;~l8cKdpC(>BkUcy0>nl7> z7FP2zt|su=o;Yy0a?LJ`?+&i?Z8rJDk>shJqGY&+q2s#s(&*WQf`IU#p$lRwX3q4g z3Ie+FO)joVxnom3YaYQaOZlqs+$#F>akq=TJ5r(8SQuvR1zg zOTM`}Gn=WI%VK|H*`G`Cl3;T0Lqyiyx<0|I{+X1vgV-p%8Q)ozL`hV`84I_BtBJq2 zM%duI_`2pqm{{C1I?9_RLi^%S22CeSKTDC3y%Eg--1*SRBi8oKd69@=G9{pOSn%v= zd=C*~qr12CH}g4zO`RE%{}EVPT;lzo!7WQpOF*VTk7~~LMb#S$V4)}=HMtu?9r2{7 zpWK>G1g09RFNVdx&!kY73=|D_?93H|<3Ju0T0X+D!U5RLE-Q?uZtXI6Xa8FRWVB18 z^Cf21hdQ{&6mQJ6cI3G7dRJ5HXR?BzMuK=8f+jU3nX0jbX|Fw-WFltHrw+vXR@c7_ z$EjAQ+b1X2`}Wm;XoGX6&JEvrV7kswL_6TbtCVN%f+tHv8{sE1eytA>lt~SYExe8Y z37@%0J*^cCT3sQ_xsmlT@S*)o&Vm{Y#KV-2DzpbSYYo^GH(T^G@ePL*9%?!+iZfoi-Ht zYE}3?SkY>CU71}J?w7qSd8+ql&L3+EpSByjM`kOd=LB?Vt8Na$6N2WX$&)(b(`yVH zk*MGM+vfF8ao6U*9zaA2F#qzu@6%!3T4DhR|+TkWvU>=$2G3LCx+ed<9BMp&cmrlj$0Ns1deG-bn6Eq%REOpR32AJmcRqs(ROdEx=># zShRp1qM;q7d}z?B_Ib#N>*RE!`%@tCN5vpPx36hjUhdZ)7r0R3A+I!oCoz@4Eaw?> zzJbtMpQ`&rElBL#)4DnT^S2=j%b^h=-5r*6Yx)w%6m7^oel9QDd-b-;*t$?nX~tm= zfpF6HUmlAyFSha>9P(la9sn0qY4)&6ggg%e+kCNi{!tB(NoRMhR9S7R$p3%bQSHb8 z2zbO-)u%p)0vdZpt$2*G@iMH!8Ak^M%O7N1aVD!=oua}aiDsi^l}lc?^)zoAzb!Hg zX3dFA|Ito6zq#y}ap}e>)>3j=3%=eT zr?lIb$~XXl4677Vdpn_boesjq7UIm&<}4StPG2xwJlr=7J(AzToqLw7h(~UGSQx;A zlo^xfu1FQo+#7L|sRUY#xw;8+e{Dv;yBsKQeYY7!l2xd&1%Mml zJ^vhj^TcbtLn&?DCi8*BBf}|iUba<5g83@`L?+L1mt;XXJ+`)iQ#2px`7^cYtGi~z zl0utQ(~-(&sbqG1t9#iu@QzlOclmtpda~mT7*G(gY{B!VOd5sF-RR%0yZwki+_!Rq;;7`& zd_i+?1;UH{(xwwEj;k+|z#*yUA5{)F2adul$(DgJAN3$r)ZaFothHKXJ4Df~hWaTOn->n}tSaJE3=}JHGDT1&MpA?p^|7NNl=o@zP^_Z$4CoIc0 z%E@S3A(-z#Z@E4(S$&#HaT-N9P3<11J@7me$B7zEUoHJV7e+KF1AdI%^F&NWdH+Sh zDdF^bxyNOWtG;zzET$IvRu{xI3r)22j8cCbwNd7Ieu_lQQx$z`f8`Nrwsm)sDCc=U zi+xsQoA1IokkUstzIAj*?B$$J;`JanKGMPQ3APxil;MzwH%b`cd2n|P*&@HlvA2I2 z=Iv0ET8x;PGJgSqqGKnxo=3Oj;l|wg7X1N#N3cxaL`G(GKAzH)i6X1%S7+1#k?r9X z8^jY{A2v=kPEOvdogGj5&KlH^Yv6D#2s)LEbyFS9nH8|w!QD|MA-8V-5f|!Tp1YrV z>fN1|94n_oy;MfNJDxlSs%Ra3PlgWv@U7^-bH)Iqm})fU$}_@^ab${z59X&+xZ^IU zd>$yrG^{>g4Q~-`@71nn*PYVh(|lpK>c=KL!-^t$4u8TYrxTti`L+GqiDK^f%uK>q z6#;k1p`ogVr*Zd>UI%JvMsuW5?CgcY*%XS|WxhSC-o_ZsOI@pg8*UUjiqVlS!lto- zYHXdVve53JvL#<`YGP1Ro&1}Iz>BbwV9OBWuYUC9`;u}I+Bw~YOBQA2xtp>e&k%Hk zC}OdawrBKlL_xAN;+850S51U>P1i?0y{lvQKlHKXUdc1?FM3xP8Zbke~E{o-f7y^&xT9%aZGR_ay z$pAqT8#%nC>vX|STPJp-?I*%FE2HE0QJ6BU8^w4;Z0MxkSjIQ zxe1ldpT&U%aN9BlYlTq!vmgMj?(zc6TRRrV=>F>2d^ zy5z-p3&;0?9fle2TKN3<-Dp&z-_RCC`mli|nOJ2-i{Jvx}Wr zUafC2R@l#R`)mF0(olW+@1ak|u5=4^NcLAZnAWE(k&Tk^>82ZExjUC`wOMQG=E!y;_O`}9aMR>Ul4Ab$ zExPjLZgHi(gZ|8XcdSSU`u=<`Hy$d#>B#NWj>yo~)lW+N{wuyNzTMFwb3&Jl8Kb?h z3I0U-6XvPv*q6?(sNs%*%a!7|nei6UIr}?YHjl=WO9M!)2$Q31Gy45g+o1sEW#vNo z05i_lu2tTKWBl4h=J+8?AWg%eKpd>EK6T?SGeX)b{BPn~kF zk$Z)X;#}r@IC{ zk4OGb&y^3?v^-FIuzuCHayJ*7h z)v#BcPo!D#zIZ#UWJex;B{&EBDsopDaK%?w%%<}`6A|Kj#f=`mqsOXwqGD&d(rj));^dQtVCGVBTdNW0Tg&N%whhC%VWcemQn!AF z&%Oyf+6l92$p`)QRe#HOwEb7NcRH~fXC&i51r6i4ttgDq^0I9Q77LK@EKavaD4p8= zD(4qP#fU?TmZ?Zr_;(B#ZqYsEadj_?QBJOzT_jtHjI;GOlcPR5BDsjRdMRd@Eo0I+ z96vKWZ_|z!FZTk@cE$)fEpmQm{EuS~(Tr112-P^SUc``*T%=N=^`83N13PK7qi%b# zz42~Emv*39?-1>Z-Ex7e&VYF7Eyv)sz-p^M$$;$H#!w$QHbdn)l;;*w?md`8;crK>(Hjjkr(AKtwM#y~ zyTw-Srx>#J!NJ?wk10%zUJlC6(7-R-4xFwx76X=ca`CkVLPeHCqw_X9m3X@}=v z;lc|?2W~=}hcNeBIm=nKw0hCY1I)F}{GUEiCbU>)u~ctNJZl}F!B@_hv|&yA(Bnv{ z?S->P``S~FEwn(h{0lhxU$HLD`GrIxY1?LN=%?*`njSdh6p+AqeK_vPUQZVZ6$e5U zq;v{OXYAanArK@w)gL(ZY~gNB!W3K{ms(;SZ-rK>o{D8?{qyIkA>&a-@g;u!1Gbh_2If~XKk>=>M>yE_1d zI{xpP6K)NUOPzlAe9W={Nn@a?5e_OKjoQWy|3~17r@!f1eokFHJklfGoad<`W4fHI zjIX`2Pm<2v9?XTOxJGaJ%hiKy`obgmU&~!zBK_TohW+y~)B(vh62MqC&VfExZI-*f zj{5SuwdEI*iuhwuqxY}E4*QRsDL^XKUeyMe<&OMe_(3@K6fM_rml(fg&fBt{&7QTe ze3**&k;sM**fybqZI_QB^du*~xtv6D9N-a5CI$OEWmV!juNGKOZok~n4EvM*%fzrM z{g$FaO3ZIXZz|3&B<@871B)=t&~EJh3mZNd_ot;$$gW*g&|~Yy)`7_tIDVXkq1^0% ziY@Kkyhw}T#shB*sD1XiNgM~S7QH9<&ThYO3z3`M?CLx1$mqSx8f4s<{!2Pa{mH!e zMQ9u%zv*~8Vbi!2f7@I30jH-62Lq{8&sNJoN=`K7Xo#kcT(@kf#|`PiB6Ne5={$YS?l>5}bP*MX$`Z9%AMD=dMg|f%*eqG}1RNFglJlIk!IP z0vFIwW4WrVAt=@LmG$Lr3z>h_YfC}Cr|@Lzui)EwIF(CuoSCaGYto@N3}j$d_+TlC z2o#Pqe>P4v)mT-#?EJzhqkr0f6G??ucVgBT{~Opn*sL*dJD*?*K-Wj!q*zYhSA##w9-Uew2L91rP^`rz7tQgkwtJL_UTBqf@<}NQYp+;SrTE2d2g`+MFj0iua+@X^4jU{x;FJ!sB>u*(*->KP2*6{wVfcI==TrHrPlR z8Xg};r%#})7Ok`AQ&RLzOoCFD(0ac)3J@*E_s54qNh9G5t~U0j#Y#Tyr=##cTF8~R z>AQH{l29=FFx}VvH$bh`2Ge&I$V%pXdwrE;(EacS21%%vDv}Nje7b-J*!4o4YscUo z^PhpZgE;8>^*Blghf8;#1)j1;Bc6d7Q)wSMz*qd+I;>S&;Hws*5KO^5md?JJsR7{8 zRy9#nDX(%y7LdV%sVE13`{)GfWh0M?i3ukk!Sqzzr-tO0Og{U4leu+fc|ib=x8^f6 zER$8k?JN5c(}!U*`+a3>tWVHIm}IO1v$PNX|9RB#sY4o;>|K@<-Dm-s2)Sj~auTEt z2V4B@-cX3qV1cw?Xkn76)VSF4&nFm9AU=cJf(l~YzmtD~zH?e?I9_bXGVd~!W{-C4 z*f}2)&r*EqKrLa+t*Jrv`?(fQ+4$Y8=IG6*zRk>RX;J&bQr4~l!lz2nqak@~zO((w zDKzE;;+(|tFB_6b-`X|bs#TAH3)&G1 zjFgpC9i4>w{y{5SP719*p{kZ!x5);|UnU(`HNE>Ta>c9Nl$(-*t@1U-XtpWp0bD(g zJ?5AyV$`opGRIk+4vArs{A^4c6J(t{W~<+vfNi4kLYr*an1~+7{+Aw?ZV@9aZiCxI zCBj=*(UbhqV%L_LXkFD<%amG@*%c+2j+ofm`<|0t*XIfp0;Uswg>)_~GeJIeJ9hRJ8fV7+Upp z+2}o805|r^T1yJ>i7p-(6o4q@#yL+oe&V90o}zz)$&=3el)`$7b30eWHi`QDOMCQ^ zux97*6!**JO0^pH?1&GyW<+Lc&NdybuNmr_kc^E36Ee^;NeV&x&+k}Ol}Sx%9P*T} zaRO|nb4J5VcDCZM)(d#n#;hTxvzC57aIcv(a0{pGcZtP^$|B)(^@YEZ+^mn2r}P9c z^PTr&>g#REBy z`lQAABa4oB7uC#=B$OAWKkSp_idzdN({mqI7&XL_KJ99K${GKV5lJQ-b(G z8g)|F+SFbN;gg|-q!J5@#m{KTM}?d{1ieAien<(#!})7k$sv)HE9i|1!6+POu$wNT zBM0vjj31mqjbE4+Gbrr$7?P{RY1x+{MzIb%VgmcEW_q%!rxU%z-r1FoijOj4P?OlO zBEIN7Vcu&O5z&*5Q$=j@o3qo!EAm1#36WbgRfV*@V53QfsAU8_(^ zXrLdQzHe(XUN$6Wa4~x)^jHckR#%g;aGn(A}Hqq_KO^8j(L?Ss{!dhKx zY@0RoReI?fr?T+4wKKnQ#xXqnDc*#K2S&2o<7Ub3AIC!qy%Lu$>|!slBPY%P!KkC zcv%ZW9J*Zj-VvQT!KPg_Pz*9B9bC3n{OTF~z1wtNNS;+r8nzUPv`I#5LZBlRv zw(M&DEh~>Yj9K=jm>VKbw7fzc2Hd_wyI4+ommj_GGTLC}L)A!*rQ{S+l<`0dvgs-d zdHzv1ZE>>ZG6d%{?;9fg*Z}%t3qhbWbZBXmSrM1v;sgJQ@F0uD5gn97r$3MABV4-X z8}!lJZq(&F7EO-N`u8a&BvIcHmaXd_1D8x9Pr6FZrt<}5p_+sZhkGKWgJnqFI7U9T z>DNJ4B6Hspc?;k>iq|q^tB4v+j-=*21%73w3@-#k!xHAGy zhgeET|LNA9_sAhawmG(^)DqdTaUQkf*I>~rU)sE}VyL$( z(m>{IUi2n2&+Gt*d@nVLZ7N26V>;CPXwjGb-J)RrPkyx4Gx2m)0aQ2E8+kCLrOJ+vhba`bGU~Rs*?1qsqdYwNAf-~ z$^>9&rC-Bvl3tc_b-{J7*3_u;yzs_5>N?Wa)}jKfx-}N7sIVd= zjrSc@iD%xKv|%$sVxO1fMAnaTHDR1fKKw6UcH5Y6pn~zL9AD=yoIHny>R`@O|+n{SwPGrrT z6|9dvQ&+vSjD$7kgqyaNm&`_*()n+V z{wzMhqWF8__6U6v9yobvg2uP-0X*0fq5i}|g)!FJWbk;n^4S7u-Bs5X4TjZRg0hQ z{^(yk0!WCAN5yKr-7UF!--jQ*>hCBON1C&4;f1pUBo!uoZrdp8+!FX4cOs7B^t^Ye z;M1_GR%d@@-2G;9P~MtkqZ(2M*y;=n?*6N%yX)!PdKe#WDp&0hoO(J1$EmLLK@!)a zrL@s>`3uH}zg~i|=R25pz8Vn1{l9fKLk;D=v69C$~B{;$fymmep6v&Kno<=r!PFCvXV#LFJ2c)uHVNSJ&(67SkI4@oz89$rBe0qc_zuRBr}Q;0t>9VvLd z4&y1fgKM@wdH*gT4ox7M4&}{$Z-sqh^*N33((H~Q9&NzlC#57Z@8b-w(UOEZOH{VV z)rbeNs|5tv59B~5L0`eid}3|ALgF9SS%*?sE$=UT{R!--g6h%`TB@jE(=l}@NJd~e zgn6roF-MzHojDWb(MaLF&bJnK63ix5U|!0|lC?X&)VCySMHbVOUAtGR!abOz7iViy}kn$0rLXmIgEnCcu6KksY9S6aN2KXzF> z-xVY_2Y+&tJtmMASWHm+(Yz^Oyc zHvB&Kq_eKpuUxmEd3V9v%6dOG(dhE;CYbkShbp#Wp3y_Zb~m~uZc_GFeDa7QgY90| z-J)l_QQxWIh~uLhOS@mq()AJ8xTh~dv9;NN#*lxcLZ^pcc;);AVsXoKLEBa?RPu(@ z#IYK+HTbp!slptjm~wOMLJ-X@8rz-O((iWB8Ny>nJEp(YuZ`e&Z;CXh@$LN(5(K&t zeuVKzngyJ;$N6|ukGpp&d_`dM<;i{P$-`Uam>gxrdC=KHu?Pe9xKQN-KN%=H`sG^v z0KEq{KyowcWk~e1TlBtYR;jEaqelQw1>7I*R2fMSHS*AW^*y7fBsVjS6H0D*8cQMy zl!m$AsEYw*SCE^SEW)rUQs>|p?0Dz*OgC?MSgz~VQBFsMYqICpG|%-T`CAAe^wSXv zc}YsP@nt0=K(3%TtkYknTpLOzSJuwZR|U0h|arwGe@k&^26d^l!R{-}T3q+iM^ zG7m=uRogNLQrFGdXD3NVK|fQhoov_EFu?fBYQbprTi}^Rm{D57S!vu2KmH|PqzLVB zszgG-@N>wq;b+bv$r37CRzkl@Rj^`$4s;2$@*+#h%$G%t{QqJBEcMdc3Yu+U!-LpK zm5O_P`>zfiW^$loczLJrc__5e!gmP!ez_NqQORhd<>T4}#jb`)JCypzOww`~Ri>1F z1UllhOF|$rJx1KT4u_VRpXuFgwd57h&gGcMN)*qOQ+=07m#I3x{os5vaCGpa@ZEfc zWk423a8%y+5^FaqlT!kE1nh~Vj7RA?`M+vsjT>HPv+z&XNaD=n&x=<-Dz>ZWse^fs z)f=6VV5RymR}CRs8_bwt2}d@;MEFf^%qbk3m9D37t)>wh_p5LhQFyyqFqTm=_q$&# z$5b{sy*Nt;v@!;j!y{QPg@YZUK~Uc(JgXQgN6xnr>~A=4oK4JN8k1ubA4Ro{A?>k^ zDciKLthM_$YL^!cO)S5)LP%m_k~=}ya?xsEf-08vXZc7gGe?33pxk)8h$m0Sw%az? z0SeLi_5=5iZp%MmGP`ldlHAC+!WS)y_f}n4t4M_7ro*Oj=V6$uVFvX+nZ5cw>5hu- zPtS@*dIQUQCXQAMZmVgSou1=zJM*dMBMwER#;ypJh{yP9tB=@1<&VF_U#BK+TGx zUv}0Ucww-I+@rOsVR2_xV!CaX6p`-0u!SO4CSm|#zeqT!x2}OD7pBCnoT1$%->)}a z^1IsRR!+2y@{8=n0`qsZt?5e~^Jn@SH%*B9Jqh}<*Iz*s1ur9|=SlJ&NSg_MxUDLm zg;>9pB2;Ckjy_m_3c#8ZGiNI@Y(Tj;pnM^K{)s= zYJXeqt_DTv8qb68p!o3N)&u%3+w=+6(LjVS)(f1EAQ(Ixl1n0AMKG%QL^70$@)BNX}ua;FnFmV3=y;?=Z`;46YzEQA4}TZY(_4O)t$V&Qq!Oo#`x$g{sl;4m=u<>dA%idWQC_NaG^-lFIvHHNQRHe% zVd09v1j~dcIqQ4qD>0h*UT$4LYz-3aI|Ui``?e43^0#Bu%<2LZ0SOcl?Jfr>M*`Bs zUMaRDCW#VLu)+mFv5@V~l2X>-D;X?^a*XSOI*|+*?)N~SOjmp!P3**cvC$ zgo!9wzUyz1x)?w9}8V?sTn2r;vox{b7eu07X`(@>yOjgQ^h7;Wd;c zyMgKXBokgdfA3>S1dWP~uAE&Lp;?rDcN{Ux5?m{trWNH2LOwTF;*j^#_iwd3^5NmR z?RQ+Oh2^le`9T5$0$-A|D^*o@&0zc)H@PkVUA50;@b?$ruN-50x7Em!NV@8(jE|U% z6dtsmHIZO937{3$>Cd9|q%8jWpg5OXW$2`ftHY-#Xds3SpWy`A(TadbK~wJj8}L93 z8kdJBx)QVmB7{7ClBm;g6Vz5%GrCDD3GoFR#1l=G@WMxyf7k6`ax>QLTp&X`l$h5x z9&n?YR&D%a4HRz3Y6@;Cfnc2VLEB1#P;TqYnaJL}&yOq@X0}5$G`EsPn8ye$>6~Uz zl{aMqQY*NbKY$+i(S&R zzJdd5Z$=<&W8BH{N4rGq_qRAXl#a~jp^?weJ*=8ch zXmRwNGP&w}n5`b~ws`*RpTp6KqDxjZD_JOj+wLF!y0-$_s~%+O#@JcP6gb#Wu_63$ zc|64yZ!uh@u=*u!_)k16?)hb2r>6$Q_-kv1s%!5B)H*l$ubCd1xRX=xR>W3?6DkXwUIfd` zVs{7Gx zJ`KV^nA+(vCC;Hj0_$NJ$o{}PxQM{fORMP%PPpc z8nu=B`jVDg3qeg|Nw}uh=T);ssy@%?>u|cO%>9wb!rYiF2ghJlw>!r@TN>HtCtd1d zH1*>D z-Uly}(N(=&>gDD+_vK$*AL9EfOdUVyoqV0&V#|+n2|}IB5iWa`o=Z?XKoh%5PHo7r z@m{uA4KOL@GWm-YpM!;g2K=Wj3h#R&UjL*5hJnN zlzJgXvBwSmc_;8YjC?DVX;7hj|$~B$BYU(W`mhrM^v*jPj zd=yW4eYB9rHVFbB$hWO&W|RN8KJDADx#mVRoE!#OCRwCBl270;@roXT~N zu7*Oo3XEI;5U1D_iU>)yqmxmXI%vCk0=K`2ouM9Vb<$3oYQEGoGz(~Hc5x=NUQxnO zA=+#ya}+IG){0_c7gV!eIK#Dt9P2mRHmlO@URz%gKHrSs?B+dU%R3oeo$**CQL_8j zjN>CH9}e*=QWqorrt&PoDtTuthu$8>j8urZFFSGCF}rYy|fK=wmaB;Z{Z6>Fa)3M$|+8#K`;p^xuF3(8I^v$Xl z*SPxHjYqh*OnVO4F`YAEtpj%N0uwpzMFmCfhL*%eoEPo2DY`u9mcEWVQr9vMu6D;y z$UfuCvU9pJh^Y=$%Y&(5br)VJq`<-$+p69yuDtEjCXnZpvvmZH=k~rvsV92Gq^d%o%-j#*afL74;NHwFjLM8!_PkmbST2{)?dPP za#x+tu1ex46k=@Ry-mXl<-me@Dw>zC1NEXc`Drkcp1wjsI6E`LH9=i6p&zr-7ysamXqe(aGu)^rSPMz4E zH~NZRrxq(tTh>7HSwMMw=!fJq&Vqahg3BRFRUn6JrmcU=8t9KyrLlr`6}elg>@wlu z_mpFkGNiN|*w^qm`=a7Z-Pv+}<*1sRdkE`1Z5LiDDIsZCN^vLYnr!v=j2z?YrAB?9 z9g8?A))Q|I>J>VppM5{!5iNhfe_gA%v4;?9&Smtw`((Q^y-bO*wnLwg`Mt`6U|Ri? zMkNN~SH`NtiB3$hP%lFYA=DsJeV3t%i(N$e^hy{oGWc-$vO77dNz{ltvny9SAX#T6Q?8%?eV zPr33@z3wNY_qu2)ze*hTUu%f=iso0kFzw!}frkBNZ5N#W&GA_lO+pdHC}fJ|32+T% zYC@?2PGaBgak#JZmx_>p2H}*?H38t61s9ij98-t4uWQ;`6_R(xg{ib05L&|SCSn;l zGqPP}ZVav5@RA@~uI#Tb5p9pK#K`*$yExs8h~cHfq?YBv6yt}^MTrSYao5K4a^)VG z1b4_4DgGw6s6wYm#-c(QGQNk$OF>Pz*~1t!)HAI_j=G6nwE_Ao5VfS%8X{)D1facO z)(Jlk#g1P^LMZ(ZX|=2!*Fc-iOD!*KANkCwn+M7 zy;1k_ffwHbfw3&Lob6bXh2u-X!Jnd+8_LShzucM?<~seN5WuBtEYg#K@@+ghqm#eX z)C!h-%l-M#S`o-f5*xYqiM`d``>xmIjH}zNrtrjr$wl6mTc6=>4qisf?9P`!kk5Da zZKGL@e)!DZ$W3{bpAtEO2eRKFAe5qfNjQxic3KgR8}956^n}4lqNTBOu7#u?q^2}~ zdII*^kM`l!MnZlxQE-2xrd=&noqA_JJs;&*rkj|>=T~+0Wyd6y{XM&m5&G`5Q2}|O z{~M)Xb_mK!ut9J3M4t{awMJ@=nh*!!1oQHK^yGe*z>kuHJ`)>WbGjp?S7Gq|zLZQpl*zA7KN3wS0Ks2@SLgae1Ifwm1um~?|L}e+R zWWEo+7gWmg@c^FWylk@kjdHp3*+pNj?Dn@bi_d;7B|SxSM5XL#_G^sGRtdkAat=4f z^Yop&1tmFnwJbl><98(YIiFTS#tUPjT}~sHc~b{;`q<37O!19NYN8Ve`@?!37$T89 zce40ftce)T<)t>^+Sv3PQk`p~*n`VN^Iv4u)HkQ3%hc(K^YB0`FU@ifrY zKyoW&YzNqLLrCwmA;yY&ifl;~%-%iod2Z^hq6@Ad`>w$}=B8OU&|nW$J!}$` zMY6y5Q|K$o!)q941mv|BB56o)^Vt>z&-e-BVB|Bb9FPtXX-v^8d`cv&@{3UV1=~DVczrk z4c<{pws64vvP=JSO?u}2_@R8;_+f&ic0zm(yzmN-kTLe4DcljCxF>h?wqz&Jt?I~D z-IJ~7h>iz6R}06z0;rj-p&nYKLxC)$qbBtS9+78xE8kG5O=&Oux%-}-s8>%j*>c?w;OHjLSAY1(|tFB_#$O)ns&ara2II2`Kk+eG#g zja}*>zWSfw=<>$%>I|WIEw#Qn)dt8l%+2Oh4RR0H?-0lW_+1B}z5N3n*XiCwMHh|H zT*CC6GI8uEhI^*wS@=BW8@!Pj=RBwCj+b4~{Q++sQ(CyBsSfhxlyj{T_vJVvgNsOB z&B?Rya-GG9a@X$%%4_5F1$Q#Rj1RCj^bs{uyK&P8UYWWZ3BiVYRtk||<=DH4`xt@KILptt%G>k~4)1optdVF-F ztw)*Z9mMk-$)9!0fLR^Xb}5|qc%m(pa%?QB+F11~ZHb}weqfs0A+^nv*$a%M_rl-( zX?TsJ6k9_RRIO&d3UHY6y`&LldY`lzS5IDuwa%a-bI`3_{oq0Vg%tQaB3Olp} zKKl+qiN!XLJut6`$8Q%FZ|#F~0(cqnYDFnN9R#h}w(*|*7|U&+8a*z2kAess5ZIZos3 z=|AB0LXp~XbUXYr{%%|#P*)e7z-8A?RBHA>H*}av_~38&T%k^7Uv)OC(vP)e2n-x! zBNu}Gfg{73rqf5K%*B0Kp~hjvR07H~mum6Qpa|_XCUOvNJ^u0io}>SnA_RUq7TA2S z+23?Q5!SoE#HouH8U|gsr8||x^healb@4Q1zJHi1#zV|SWlyj5Gfq}6z4WnRWPW|X z_O=yLR%M=}E#YdKXRbRGoF_rXQQur88E7k`-0j%GpeP*C1)rLdPxqUa2BiB=Vg%Cl zmX2`M*kJfA!8FRn7*&0HTVT&5{aurWp>uw6-d{Y0G1VNW+YRvdq|)L31o~y3DiX~O zfx%|@g!T2lzYiLVG{^2Q#T%p@IOCbxEoWSZBm3>_MMYnZtdImU+ahNBQ&cmtNW0kH z!s@moAloABLr&lRi_LQD07U7hP9KB`kzhoIw>_!-9yr_xnmax!!Q1T7YS4ANltzuN zP{sDvQz4m>rz7rL{Axq-PPNKFSM2 zS#3VhlU48JVmZLih}bSlODEEM%5XWs9_bROoW2+fEhD^nRiKv+3^E0aO}uXISOznZ z-(F%{55SDsx0sXEk)=FNe(k39IF&Wf?yk%;dJ>Y9AX9S^*|8cgQzzi%%Ew%tlXxCq z%%wq8zPT~H-flM6O_Xl)6zl?r6$$1Tqx5q@+jb`-cl02>abE;CoDFqNFnd?GcD{wmUzzZqAlc>h!sRC$vq zZ=xSMw2*SI*rvugnO`9DjOcWOJ?3h=aM=~5=#?ZdxZH(DuR^_VWhzXl%aDn1q5mS7 zrqPkB1>KK!bliQ2Z_zIGn1G*RY^N!%G&VMZcw^LMaR(#&=}YpjASZ3~avcEQ2^jBayLAj5 zUjnX&?^e=w7Q=ReXf*4?a-oh!pj?LzFQaA6;4X-RKIXLjA$@0y+QzHu4L4L#v#W>h zijdy>%<@yygIY+-*j9^p4SW~7^8?W$Sha(u?|30&q(1-;6%;-=Eo6#oskM^PWGXE{ zfLK+@ktUs%HQH=l?3J0fu^q@mSY+w^zD!>}4*%7*q-$xl7z1OhkIfJIx zGSaUquydZv=&_2IL*d$b_>o9s5xGOqstdnNYqj&aTLXJg4{c64Uh`eOWW4BaryDXm zF)S$^gr0_TZ^Qtz`VHaMG$kP5gQJg_^cu8oU$LYEtbXOvaMHEUVdjHcXUKa{htS*>ABS;T~>WOa)*0kC&I8+U8HP*n`+Y zt#1`E-h60&8JAk1>sqGug>y)w^ERe-;u{n?E)PSk_5fL18cXjejQrJA5s?Bo!Y#&I z)baD`MIi)`X{{2*IS{ULANA%|p=J2j^t$huI9GgNve@WSWC58rl0Bb=(?dT2q=J8H z9|wrB^|2B4QcC%41}+T1gu;i(I!%75+I7YEz!JT@bAR8_Fen$5rb*826~fU_+FV_l zqHYy+qwVg}CG3@`K=zgW?S&!vr6!Fl%heu?(!3CfZRp17-HN!s&;~TR&ypsiv3i)L zy|{K6Im-kgBm;OYYwk_T-g2?HH5ceD08?NODV*x`vi&>8>T%c8WSOr*{nD=XbjWvA zvtRS{DJ{6;1)bs1;h}iMuIj@;2dkyo+Y`0_JQAk=@h2Qx4N`}(1c)HtR#`&WyC&O9 zZKQq?BAqSX!Qqu@dWZ?t3Rohk#ORvzm9S^dSk*wJ{hRd7wp%lxD`rv(Q?e|kCU#PC zxe{Us=Nl>crBpMC`uSE~m^Fa@-UB3vd;XViasEypf~4z#chAd9|H9tkw*}*v%2ohq z)1(fkOWoIehwX~cwnksEC9f4C8*Q}yXvpi{P@RKO*bK-TC{tzv(42dMo{g> zf-mpH)U-J}sHB8C4n5xXf4MA5!f-@3Ic@f#0R14;dx;oWB^ejryJ5hQImAI5Wm&C= zDjZ4xKsUq96p4j53?yXjfde5?#2Y;$C25WP)QbpUXSgjSHC)99n8jU4(S3WCxxHjD z-OY2k>|YlE~C!0mdu+_8xw7}wK#UPrh(2x{{gI zdi|mfC>qVv7Wq17;R%o+oJDj2Y>@Th4c*t=;QD(#H{WprPN$>pDb^|S|6&2Mv!XcM zBL=^caM^LDjJ&>>bf^5+)if$6^L7@5?;`J%|B)#m(Dd{%rjT|27if!t3s*z8eA& z3gBew)Qxley?GB-dIGfp-jkilKlcax*u3W;>-^CCWobwivxb6Q{->2tEfg??`^PPY zEx_xvnEP8<1!TE`hD0C>-o3AK1X6>e-`;%Zn|mtXEOH)@W~ZN}lIXQPSQsV-n@NDCiUR<= z=a${7aNEJWg<1wJhiw=jr%W~QZ?DH}wLUC+{o_MXt*`=~VIJ2txk8(J>%>7kZh(q` z!BozQIL==z)z0O=6I)~fRKKJ|;K;yp3v~fEDFkq40DFoQpvl12!|EHjzsGzV1c2df zqig8_G$wEjX)QmYR}H?*%lkxrp2|VPcrAM4-5!RjyW-$8!sWF9&|A3jPnmR{iy_%3 z+V15fcmRr+`At9BCcu?S5KcO@38D_ZnVk6^B9KP@Ds(-b1b71S2yDp-vksSOs#>C` z!mN!H)m4*$LYf1!e5(u2Q`@Il$By0e#mzIZEX#RE<{dX2Dr|L`O#e5n%>)b|*!n>a z!Y*$PsqKCm+NvM$^1laP+qfz9?ot68ar$AQ{RON7UovxjJD|J!Lp=^lU-&or@FP_IZTl7*b9Zn>9bciM4ztlN5K<#2m z$@h8?b@H$23p9%amhVDkn;ZhqiuSCA!FB^1cJf1t=EX{JZr+PZRIWA!c?98%N!pb} zC{P`5g-JPOKRP(-{HB@C>wNOaXimKXj*(t5tdY(6mS5KUPhSl-sQu-DcG z>6&?wG|+y2!=GocYJaoxH*|j#^4O9eY_JV=3vJxCmItuF3KahzlebIKmgi34lvU}g%{85*Mtmr@QAnO5#NJZT)vfPz>&41$T z`JA8oW*&`9Ff1BD$wksmPu~<~ zOuzA;10^-0@!r^~zFVD|n?8m&uJjb}0v)r@B_nx^NV2S_Z6bxg)XY#~7Y`OSBC`pmkDq@1D z0C%pktaf2gNt2$x3xi2w{3*@m_i_1p9S}agX}GE`=#d1wjl36gzd1x!i;h_oC_N*l zy?AU?vIeq$sC~^@dblY+ICj6Sd|EkZei!rKO$SS9ug^E>G3VP)nUCiQGkh{v8Q!zu zHZ(Rv0(vjA&dgyl|AYvDm;_=0z|8r=Q}g6!j0-4!8?qP6?nTWBb%t zIr+nCK6c*?Saa|D&F*|AxyF6Bl=Pp{{+uYV;s3lvZ&DyBFlAl$B!|@fpJ~&ao;S2D ztE-+8i?~t8;)&BQiw-WdQ`#alI#!X^T2F56ll(#dvr&4vN8o}XfcX0|k)0<+{1uG7 zxa1uWWWmtfW^4LPVr=`&u{J2#@oo~)f9iVA;7-=SxyX)*tC^vP>EHC)QI%-Z8miaX zINGZ?>!0ii7QQ}swv)pEP04GXe}@Mt`h1-Jl%+?^;fdick=?^@;b=`K7vC=D{w&_O zD9BLHlfzKoL)E(@@o!OEGp9xNM%i?oJi~XNNz!x|zUFTAiq_{D2z-~C z9NRkPI_x^R?z#Jp(EpKnB$PK6UrH$2OYeXMw-E%zoVd~#CCcrEh`~&_7H!=cW12Ze z&d}_7#BYhtfibI48E!mni)lAA{5rU~qCKqDpgyQ&^^}6e)~t2O5JX1#WPo>7ppVF?aIc)RNn@@lw_ekNAqOQnv?Bn>O6LaSEz!gBdY zr`!+|8~GP9G`MT_3R`DWULSIZNxsR1lAPXAfI}@wECCRDxn0@rBI=kH8ddXUAsf zHdF69RkW;G>;YZ~AxZiZx{wyeZn7f%0;~KAN4FZQEz)cUo0byKwXw0$OHFEIaI0BC zs+xx-s+6fX%1d?JrU01q()6*~8R5yp`iYjEHcD`5cRO^GJJr+X*si*wG0H<|a4bV= zjFfRyT3S%s<_bd0A#wr#4Co^~W_`dRe$?ruyADqC#vL&VVS72d#j6owsRW!fhJ zV=E`8wopbiDbt{zhuL10G;R=4TUM_vaLt3u>@7Uzv$FT_OMkR1v+sUo74=drH+;#w z7vog3-pL-&-FOLG4c9gkg2LCATapO#Y0CKdfessfl=r!l_M$+jM~CM2b!jQyEy&H_ z%>d8u5A8rFP5$$>*+1G9C%n@?zXZPzSM|TY5p8`9ZYjERXGeeaVer*X`1W&3_TaRX z(gEo_Im(~6ap5ZnILMgMZhc+GKL#%Id_8SDzN`3QR~hFl`dF>?8<{%bqpTRqjsz5%Es+(8#qKO?dj|XGGFZ{RzNGS~rOoH6&TQy!+17&`vJxmtVm%_v z4yB0hJyXhRY;x_g0tZS8tElLzD#7D3pV>-8@(Zi^Ss1Oq5LsiC<|?~@T_(#|s-A2v zn_`R#oS;P`>0OlZ3%U4WigX6fL8ZIz$|;RTB1v|kY%SOSAA4^ZRA;lTizXq#-JK8! z?(P=c-QAtw4#C}BCoaL=-95NNaCi4JC*N9ouezt|-dlC5&aXXxP}D2kZ}%A8WAu2Q z9&J!$8Y`gIFfC=&2JqN=xSR1<2#}hMV@4S+BVE`^5fZqAq*FA9Z@D1PHq84RxWBkw znw2#iM#~T>)zKJ|jz5j5Zc&o*21A)Rzh=BzimN<;aCz;xFwdXwaixJe_Xs6JUvorS zBg*5<`A}s^W6)&*5B-{(*d&t|okcb(6-D^_cjTkzK-G%DK5*MvQH5FW+{X>Gi_x=G zyPpdHTDTH*f772wH;%$eSDSM3z&&jpFG>nilDW=Kt|d%rD`<_@v7M>YBSdJ3-Q zZM#rmqmpE0ozpQ&#dMifemhcE1WX95t_u04El(;Q(Lw3GgIau|*92LHQ)!a`N1OnH znd%Kb=v^G}LqBLj-xwsI=^W|K*Op@1^%g?m?duh>0z{Phu+TcuX3fSeIYW8{XSVKy zEnIBlmEx5epdjVu1)sesi(eKE1z`RZjI9 zf;yOeA$e9aIl1#xsK6YaSzD~dhYhjf(i9X-dgW)Q>A$%Pf$n%*aF#{q0>M55i8qqv zA5H0KrHE*;JqFBOt1N!yvY1gc9QcqxwD9cPd(8eQ?t>F23!9eohk);%!-G#%f>V>& zyA?10C^*Ja!$FPODy?05%#K?|x#%7cBXY`c(|#J7S&D`7A;AwGs~Z%(3TIib@+&2C z{IN-usm=Dsk2YZ#%qJ*)$bM6F|87N=&_J4s9(nRfy0#?JBuX%2H#Flre`ZL`;h!d@GbDxOQOUPnRN?QEe;odlvxlx#h%og3 zsEy&C&L{dAi@Yn=6S`Ooyq51s4&%1V7kk(79FLnn0)Z}da9`mHzIihhAKDy;`9^Dx zo;dYbQR{2UZF$SxD`+bFkjxhFom}fKwjpE~K@l=U*Xz$+5g}3EFLX2wPn0sxdUGjo zc|T3kHfcZfbMN2cK_CNzDTQYp%5pm3>oYqdFN(?A>2)Zgiu01?AH(4Ii#VO!cl&~G zM6A@0`v>Pv*DZZk#8<~(u>?>mB8cUD_PIDux=@^ig0vtf&UTTFRqDDn>JKHYSE{MQ z46$%}xk5NsJyl3*p?OvO>9z*v4>3`uSY}TKEOPqMHWCL+p+~e>jKUi8Hx_6KO0p07 z3^w|nOFxCv%m&BP1r(+55`PmdRJUNDn3|YGpa77Er>;+tl!@|yCZj~wiz=E)BJy)O z4TTK*{Mi^&LiEIGoFp%uTpNu6j}N<=3v+9?#->!;xFzI3#XC$91Xhe6-a5& z^sK$H*i*B7Bcz|j8(qmgo4(z1??PQ?d5!^eRoLh8Kj!lpiA9yvGX~)0FpD+#25$6$5GypIDpN zdgliw!1HH_XgYsqVt>f!&%mRMt8J}Q_uD0EQuq5pQ!}Ldrb$>%iwrCxz|w9YZ{~tWq#TT!js|^Sb*w(Woi%EaxauP=< zZWB?BNRA9A#+&(aSh*u){s{&%YpcDNPkb;PF%Xy%n;Bp9kMq3FbPRplQL&YXiSD)1 zAX_b107}m1SbaP%U)se#%RkHgDsm~=9jAnbPCuek?@zfkgAshz4*>``)h2u^^gDrP z9IE$#<4FbVtl$ZuEq>+JO}1caiuv zmO99wyf6?ie5e&hc^Hx0rd2R=Vv;j+tUSsfif3Vxzn4HDs2JC?nnHAAp>{0wSPh|- z{BfV+6dFcNm6C{`tjw-RC~P-b$$?^fS4+rh{*7q1U!ywRdm#% zZsBk>8Xr$I%ZxM*31@+aNDp~~Q>L;|`@6G~%1d2{?Pyzbi4Wy~idedhcpeRiMlp?bwP4N80Hykk?sHclS@fo+5v6;>*tZh;(OIR4|^O z(;KdIB5XB_)O!}#HG{dDllIby!U4~)HrnYwDpY8j5PmwK)j*>NAd8DhKW1X#cEzZm zo%#_Jv}rehcxdEG?zN#c+=g9kcc%N416ez+j{g!@qvKiLMZAt0#QypC3A^}UKNBCD zU%Eig(59rKSa?+L?{8fhr)ie)T7-Li5Sxbnb|$HPtQnk+R*aO_36ggI;irA4mk4P)gmD1HuiQKE^s z>{N!b@IXN4b&o}I2@Ru2-UC%S)Ieaxl7$qQ$B z0h@IT_D>En8@rk#ZdQN6%G$Hg&&_;g92}`++=$2Az&i704zrsKqFF(>RWL0?P=ZK9 z^H(qQqYf1MS|7cwoNf{;P#vTogds zCPzJ@e$o42Iv!2Q=x#Z~!=A-_7FjoW2sFp{@ZwEa^U@%%u+CBY;zz#eaY>$>$f1nR zX*!GTdhA22|6%-PwWoh?J-^i?sPyTeU|z!hPh@IjehNvq60J>ZEkGO~aJbUQ*Yd-$ z%fOV5R4A8wtH7Z!4qQMigC%>wf;5_g85OYg8K=zJV+oQ0*Q&)DZ^{ok_Db-fdM{1fTf}6xHZW z3K2gz^6^Ue$a)0u40{3`KJ0x(C-`e$^;+D}_*M)ZiUUPujrgT#ty3SOtJby3{i9G2 zxm`5fH^*MGUjtz;lIL2@A_o?^Eyi$=gn^RgAa9+Mb-cZ{BqUPMWr=D|kzK9Hh8N~4 zQa981ulfty{<$8no3D5wft3TUMGb8{@nI(RZ{2Yz(hiXv5|4tEx)k`+6|I5cP1ReT zmAX%+ik}&~J9o_t`?m5eBjkg{IzmjQCTI%~c{np49)-oZydW8?h#lEVV&J{PlkP+O7w(Q?ND$I>+Ro8iB{f<(lAW0L*|P; zjxF_7%7T=IL~g6o+%lqkVBa{%3|Ba#X2%(zE$-qT{@60e=Y%vNa}lt}sqJMa$W|xj z#}TZ)xj>mIh^`&x7@JVH!v~bIEBKPo=1%QZXTPG@jo_j_yD)0Rg^wmPMNY3pRpL76 z-I4bi7SyoVC(jFm0w=jyD1ZtS*{XSFeS)97w29`l=-=ZhZG1G1J{4DG|JfgK8wDeO z!4Nvfv+Do-sQ}V=9~|lj0lyuwB1=Z^5+r-bi?dmgO%7UcC^PKY#}Y&VQ0QVrX2L9| z!8k8c8Zk7p)facu8Q*Yg4Q1gl%D_TF&w~uy;4?craC7gnEwN5kt|2XgB;0I&np#JZ zO_0QoTW+&$RcHC?mQdfLr1KR^I8&)o?M&acdr$M!)FO6npX5=VDAFFum89RfhJ{Jz zDIu`+ol3lZcs-`XL{q+HP_z9#9NOtSFqooVmOOLtEz#U0mAAc zu?bAeQ#-@2)u4cKXTz$L1q)sj?0NNNYh)*ZuB#KK=5>tGji9qN|MBIt2!qHL-rq;!Xl2J2Oz zf|A?^Pi7Go>aw$gSt;W*y#bu$1#mT9*`ki^S4jez=5a!Pv-$315G|mmJyC`KEo>c@ zu7QS{AAoN+_0|7dpTZF;-WGofMP)jBBv&&bW*Y&4ir27;NPUC@NiV6x>3(~>I0+9j zR;D;Q9vKCRaitHF)kqB&#zt^0YYW2_)(5TrF`ihp3az(B_)CS+Pt)$M)&-*o@2h6A z-#jDRDZpK_?F~O`e<110(zZ4Gq13N#r_tIX&3r_${B>AxF0!oZ3>%9VevOR5Bd&&gi-@#0x9KGB0cYD+_b{@p2qEz#J67h9u-sSz18V_LiUMSFu=h`x8($4yMO3 zYBq1Tj#m??jh!noN)1zVZ%ln8v0ivxnKn!U31IUn6`H?XxQnGjXy6J){PgZuGR(E5 z77;HfQs}s+Mcimx25BuUeRprUavM6^ej#4yOFPBvS&DJcQzHA7KDpIF}=uq zz|8?wM}tsTJ{bu@6dIZSaTssXNjNm+kWfB8*nxUATu=h1w)Rmhlf@CJeg9ox8%_m) zXVWa_j%qxwNO)Kcw8f!Esn!`6IHhE|>wfQ5{c|~Fl8fgH41p)gOmwr2nFi4tf@08` zx4UAn-^oSU&oFJ41woo7TQG?OL>N%mcwIgHOjQ;te@GiWVR!V>`Ya%Hf zu?M{*y7;W}nIg$g#fkqb+skg~2=gAQD zzaE5ne+l-hl@r66IOrWv&lC23?T>K!02F}{^FVrcDBv}*{&pIqEywe$EixPC5Bv7P zAao2f&TDxEu0VRM9 zr5x!v*RQ)A+>;&Fpj9_CBj zgMpg&t|RkP;B_?jX?sV{V_(!7^6V$>&?B8x1vG1c$_jjM>i89MIlBim^bF+VyZA0^ ziv@)wai@tgzk`I2@w2f#295WLdRfQAywfbFzDML?BlTt_<(c_Z^3@j%G>rR`saZfn zxJ}}E~NVm)P%Vy8`D0wJKFV>nGsn5=+gA+JR(; z8AuGa@{Dl8_0Tewix-O6bGu#R{kJCSky+&c7r2{19m-}Opl`j=R$e=AF@PoZB|RX1IL94(GB%t_e}gFe{#l>93PP5EUvr7GB|`oU z*k}63VbBWfip~zTJ&5|EmO)^5za?SNHm@*TAQ`82I@bVsgt+h zHtRNJE%3pkZydIs_ZuMlak?`_CJ}v`8QzvzIPUi#Ma~3An?1`GEF$ zA%DX1>oq5is_Jgrbwr6Za8oi~c9>31*69@MTN9`8hDMmT$o>y>szX!o%|b(~zZ|P- z8w6u~fn(au5m4(7c#|FW;Xt{)e^fa8yKMB>c%>ADtDxlm!AJ3T@^<^Klsbg2optmw zw$b04&#tq3M4B0i^fBoZ%9kPBT&^4k5rc zheM-{>{?N|j788=MN)gGi_}kfS15}R;fPx-dJRhT3}j@%5%n8>rDO~lu%<`kDK&Vk zQc_jZ+31|QN1`Dxb>J{2PF1b`3EmWv4c4!&;~8x-5+%F1-#1GS-@^)Lm`8fCB}Ag1 z7ezs15Ar?;1B=~Lizd8x!?-JbiRWno;BP2nP_Z4`<5cksJod3aoT%dP2ARzR8{dm~ zA9-TZ2Ux{8PFpBh`*tP5NxpKDw;@S3bb&;C#8EYmpS{0$vQk+N|0K&BtAx;Z1cbIy zUgDXYkmHaiJXr@A*C8QF%DApwpkQo!~cj_9El%ag()aU4nm2MBnm7)dcr z^@)!a0u6k@VPB&x0uc3+n+rqmpZ=a@dPgyeJ`Mh#(o1V_yi;8b68 zvCdL61w%k;X?gh(MvmnVmqn=DNU}S<=cn2LV;gi)no|TNMiSg7zpI zrNfe2g?k;5BHP-@-=@JK5rliUDccb4mL<((E~Yp_h2dNcdOY+AHdp)J>_xm*`qR5# zwjBV=@U#gw>WlZ0ao9mgyNrlvSp4SVxULPPF-XU)uIZ?49>2;UR+wBkCneMQ`j3xX zEDz$d)3?1mq~KYsD-7Jr!XD#JYP|{wDdc%|VRt58NyD6-V`~m|7MW&;nt9d8t622z ze2q6h0|^2n2F|2*_G2x1;TO4~zN)U!(RwxC_c6j7fA7>h=vj zMqhqit6k?%mocwbzaAa-VDhwk-mi-KRGXJHv=*8eRu<#f{F8OmOb+xtCE3>$pSm|M zh=~ ztFe}KW7ia%a9gg1LlV1|Co@tU*P?T&Py|)71@7;n-m<|XLGPZx=c!S0T&|hU1dK^x zPcvsXzM#=* zr`Q~w_C8_e!1Ms)Q-xv;(jw!4i@fRydKGMiT3h0JRJ^~wJO2bboj%QQ5`l}TqWTiK`8L0eoa3PrePgSgJ zdE4q2&U)+EFT}1(x)|}S2={Ho>p>?2xaTH26Kuw zXt}CM!xcn7e1h9tQZnk290x$0l+3ksw0S=zA1MK+MU(wBzveV@lg)u*Z6UX!a;wb2 zIE8tt?%70KFgpR;_lbBr?>da`Yr{M#LhX`G^U|%?fz|DNcy8nAQB-Ae_)zB-(>d%x zHC>zrpx5J0`E`G(h`yDrTIPAqD#s6@1S1pMxR@i-z9RGPnEhS0y&|tApw*^_xm~FTc8~lKy zE7}}*uZ;7ZlRN&@wwhyY;Vsj~EU%V7pi(t45B}09r|c-xw#XkR!f=#cUlwRg_Y0~? z&b7^z*SZR>uTJi9>sB7ZD26q@a?SNVOX8_ynDw{~^4nJVvEHUEwi}}xQ=6g`M&nuK z7Y#`t5GIr^zU-khpII12A=HZ58ak8C3;%qJI)2xCK{s{2 z-)n`{aNQ=Ui}40r>+@ha#s$?JB{2c{+7t-sok{qdq6B3b!);RcvV()Evy@HXh?xw2 zt-b2_)y<2Jog)+q^CqQ-`Y%TjOu3|QPuxJjHBMixg$ixVM;bYYL&Sl^|Ni&8qaMz; zbTjRGGtnXzj=zyU22YBk<2)t#`N85;*GfMsQamUqqGNG<*i$*PNL+t}8jL$aQS=p< zL85e-Y{g`w>X6)09Ol^K$(~s#fTj{YNx}v~g#249Tu@7pO0e1Fls|lH)n$kr{iC0UAJ6 zC)$fy6HAy|GT(i3`(*|No#jTp_)$V-y=N&eb5H*NiRsT%-WV)u_WRBQ!-FE!_5e}B zY)b2q+e1D(YA6^q9Ca76#-N?#wXO-ZnR7Wy%cT{S-ucb-uHY%}RDM$>Rl_aOt2CtlMZ_%AmpD>4%wyO^eD2Y_M_9V9qdc2x>ic&$@s25l83}?DW zya)#@-r*M(fORWusjck6i0j-!Q_^xMcH0d0K5q5m=kjIGs`owX%*g|@s*Frejb{uv zPADp@<>b@HhuU&6UJ{Enc?ISl9>yK`ZdfSw6_wKhJc&B0;B0kUBci#!3`rLZZ!5` zPYfoj-kw7byd>`Nc*{)pM*xNZELVN0iQPv&KUXts*U7(}$|GTLTD;0ymKC=ju&Zq( z*a4bgkdtv~X@Zxs_7++LLyeyJ8eVKsd-jrTFsPIqiN^hi@QsnpMQ3$oM;$7pRgZ~0 z_7~UmqfttHH}Y@1s~H9f+_O%{6uo(cKLK(Fv#OW+j7>2;A?_c}3g@5((<>nsAWY?TY|8$~=g2nLxLBpBNRGHb3I%63$l`t=l9@fg9bGTMb zO&g4ypOt;oiW425{k6xYarf|Kc`e^OA0Y!}WZP`^r+aE_OxuI3S2E%>0oV*6x#nQc zx$=&baeS4t9n3p-A={Teg!Gi*)>;RL-^&_#h)`BXZX~n(8~ZL!VRRPXm5=bP!!^|3 zZCmjpB}m3t?iR`op4cY>;h$%~<>r22kJM!Uiz;2En;JCK7Q`Ae$@h?FT*e3TE`nqh zSFjEn=H^O5Du*nh-87#Gad!oc=#!HL?u=i)?9(mlyfk&_M(F({`n-TR~=NbS1rf7U6vUr@1z9{=Ty-}Nu*$!VsK>)zFPU|uQ zaUpsO=gt0ndX<@=6+ZVe$Xc+TQ3Qi}(aw%e)o<)X(Fll-A9|^Oip@^+Sjq3Q5DS&3(}i6Jv_Wzz=do{V(D*$qTtDnjI4#oTXnPb+%n3y6l?g?JIj{!f|* z{N5UNbPlOWITE#(cuC}4dD!&K;H?3LWO{dOd>7+y`t_MF`kp0$IZL-Z19Q>Qre7B^ z-AUmYFPBD{{Rs`G4&Q)NGdZdOpeIR(QZ1&gZ4`c|B5-r4J6{&X6 z#ns(xtm^Rd8l?jUT@nHAF2z&lygm>@7wa>VOD_K1z|-!^ykBKVs2*g>U-`-Jl9=Ra zDMg_O6@bfK{X7QiXY}^O+)hYk6s7-nW{D#ZN&erMC6LG@q2NDUD*e0IYB5}zIlh4? z8?R@fr(4eR7qVtNI}oU_NL`GP8*vVpsFe`QMW(@%cQS!Z>Pr`3E_kJHtEBu#2d+St zh!gkDY|j9YS@J1N?la|PxxZd&-Y+84kJyWM$5iS*fDRSxv8>;mk4b`i`Jj zf$Sl6-~4hYYdX8~*VG(RqWq&s(+B^~Px;#7I^~_{$9$Q5Qv>tQP)-SJL=Zt9Hd?BZ zkBYn2j`CjnWXl@JLr2&f+O!?bjCQ4VcFe~RB^G2E(m&%Cs z(;A6KXy7sayB+`-)8F-gHUdKJA{1tGtdyX%zw^sOu}70{jseAn7^7+eNBlR0EGhAw zot={vB@h3ntJPLE#?*Vd`^LU8ni3pu2dW9KROz{<2@+m>8&W!W>^sIcSIS#4VFqNW z-Uio7JSX`}Qqq>&34Qm8DJ%ia!1t&}F0r{22bxUULSt@Z88f`)9MOzMlx!_lw}F9L z3}=!Om?Xr$NXXZ{K-0Nd_ILK76n;08&m?OtPcmRyOJS{c3j&imPqV1ZRx1qediQ8w z{6o5fT=lYJ7o@t3^Ix!9ybK>f6(H}0|Hna$E@rHgIAFCu15NvnP;?x(UsDywwfYz8 zzfbOXl&>?G9!m6qZm%B{r2*|fJ(ef~(e67Pz!?fC;#nGtF8D5q6H?V#dCEeS_;gZ?3yV} z*4;{DHVrVWmz?5&PMF;5i#TS^IcZ#u;n{a>~E@ZY&|DcAg#V}dy%XI zfZ?yt|7M$5BpZEqyEGtQ?90fv4vsW<8E&s*pcN!EcEi0JyFC|7NE@a=NLWpCKr$?p zB4n;MG04zzTz-x}Z4SeR{3GTX?nR01FxQa{!4i#*o*V{ZK;LQxB* zn2u4wSB3;*5tIaFh?X^W0ka=O0Ug)p7l*iWByiaSGx7t&^BMQuO#T3~!p4Q}ShE8| zSiR7LBtUn978JvJ%Ey8X$0GhXDvTTiD38zPCMK6%GAKcb(Xdz;`%F#_;KHdlEld=z z?Kc%Pc^h8?l(Zokt8YVuw(p9W`xD@y11DEHo5we>(7@RO$_i%*Y3+XMaqLP1KGa#8 zV=G=>`+QDy`t74g{R`jTgRH&dGxATUOum=8b0(tV)ezZ}#?Bkau&oH*aiSySo!3B_ zAIiI^Ilqr8TkUa|H`Rz)@l56+D31_4_@-u$Ed?;W>-B{)mbt_zhP> z@3MBS&4%>SMN$rcktLsvWpB3hV6N(h@6=nES2D4Yl_?B~%2l3O-jmJ36EE!?`{TMvLSoH_H0_P`ga`+zSnNZT|M zyD>O9Vvv4w=&UVk)#nQqHZf|RzET~-Tsr1?NA{{|Y+)vD#Pc6&W2(3^a3;p<9A{MU zvUh2E`ltiH_hOQO;FC=We!^ixVp7fsDVeptqp)iqCGrtGzxmK^i3^xB`P13x1b2vK z452K?xyyY(`ir@OmiiuG@}3u_esy91pdGqp8a!q80WFDBm0#QR^^e36odi#S$l;$4 z?QuTjfj@F!`m#O%>b5Sy7t7tMSP{6RcXLUrO0qIo>U0RbAzNwZzmprGiN|N+D?mTV zS2cWg|3MTN&6I-K7nR=*1!Ryg7k=eg`)NJtgEb9ns)XTwI9vW`0bR^}v@P`r9(d82 z;&hDBR@C1&S)e&GWeQ|nIR%M(N)gq=2h23t6474c7W8m&#wa1sY*xq=#^w7dAFY$w zejw`=Jote2^x^KQ)4^~eM7YF_e0~123;Fu8zs$j-YZSN0yF4)2|C4`HEvK!GaRg+H zKRLVqAug!gQMnq#l0H$Hm72VIs;Ui$t+7O!Fv1f1L!L$EFM>1Jj+O5)u!g7%P-79S~#X99mRE+I5<(yim~ zXt4T2oZ{(>_66eEzcAUAL?X)dAYvOU+sg#aq7<+v?ggz9@FmtVv;OM<`=VgP)s%;Q z2aD4u^{l3@fHIx96UEnAW`99LnH8}4^1o#hma3xQ?lNr$mDuyYZn8UiIZF1Dk`+8%AXD|2QG?jX#dkG2nFb@H7z z|Kur*lP?L{-3Z6Th3e~^YC;0Cl06FvWhFUAdt+w^rPr00t4>M`wxmtYi`yqjcGFNh zy=lzS(oyg4)-A2_81|nm2CCFzb8$E`i)cBJ*4+53*{|G0Qi@jY_Vbbj%HQN%Gwjc#UCrd%`A2+?HLqV_5_#sc z>#TVZ%9e*V0j+)g1?2=3lZF#>AHBpIZkn95F#^(#DiI9mA^mJWZ2)7wxbb(EB|X5E zD+L);2>guVMg10bmpb;I*r=X?<#&)9bkW}4$*{?!0*wcB zZMyCeJOne3sAyQv%H3t;_CMm!xcA8T71~xF6Z@N}?M<=eQ*MY7ZQ>zOyyEiW{MB9c zL|?OG3mFGXpH`#|Z+s~<`p$zjN2Dc}3`td@# zoybmhe_>otf|WvVhFjQ1pvS)7eX0;VBxz!`3#mMEA{O%1D->LPY4P!xB^6mf&kkAR zlsQ9Y3CNq)k%vnJ^HNWcwVMvQI1>xeb+txb_O{BuUPLaZ4kMY(`#~@@bP$J6fzV>c zlS{Z|%_woRr&D8Yggw_yF+4IX%C1XnkO~i$v4E3tY-Tx7$XNaO)CFxARQ+`<`1Y3{ zhBJjvALg#$^=e4W>KiZuBP{$wy35WEe5sVh?v3{(&;OsDe8BXv4GNFXz)dkuv6Z?X zmkH>!?l+DCB%i@az8reuo2N&k<-w<~Zgl1QI^{$rm$P*%=%{?HN8%_&xO&Gfg_aRN7zi+mWp)45 z{-_S^tuvNtN1EBoL;)=?3J-HVI$oyArVqW_f+2VD6IyNFf5Xf{wt6C9OB}2Oy4saP z3&Uk;Df<>s?n0;su3ViHQnXK;Z%T~i5#;ZZ){CG8kY&E zr7tEZrh#^J^hlrw73NhP4Y`)K_lZglh$t%0%e6kSwcuv-j*9A^$RwRVI=jzrO{T`W zY&AHaubkM9LOui!J7X`X?sP6?6IsY%b7pGy{=WZfD>xQOL<#GGN3mqFIf!pc9~iR& zQnq&4b})44->s$wCg;y|u@<-HFkfgDXBYn-S6w{Rh>eKZlhP7|n0< z*ID(Dv%dq<5`MkhSb3Bz2#q=?ozJ`beg7PQNF;ZCI-mSp_GRU&EdWF;@N+wu#Y?X| zAzAkbf%yf`=3ADrsbjsH?MbmD<`zq<2qQ&*zL3gP-JCh6`1<-CJ<)BlU~H>L$##!m zvle$Fo7qO;SaI_3CXYlICD(7dkf+Jh(y=rCYHHp%&6iNUGq26geRMY zl!|vmqNO0cND!J54eEGe3>iI{+uhB4nPn6QYH&*C?bFCO z3RVquPe#j@B{FqR9Vu2`EWBFJ#ZAh_7O9J&YQVD$Ka+j49a_XhL^VbOH#UBHq4!$> z7Zmw%baQWDl``Tw!G2~x{?t1D^up8jjqJ@?B}qU~aQiwz!3$ij&CSV?UQkf*x!{+S z;5J)|`(0l58eiW=IZa4#@Zdr5=DM{!`1Q%5ez1djpPrUL*s~%9{0n4%RymDu;lcjb zZ+7W9ZbFD?i+;EoDoMbCwy&=y&bZ!$Z$kH%e1jdPnqHpdt(_f#m3YzH-9n@*v+AK% zh6La1?OqR24Z+FNvLGSF7}OGz{z~a+sVxa3oHEOtgpsa$67>3p=2ZBJ1t~@+R6-bm z^l^T&wO%Yp0r>d1+jSTLy@-hIYOR=-7HW)i*dZFfxIC3awb$`sPVnj#dg%u(o8n69Zul=EO*fo91Iw~Q@QASq;7q?o7l%g7qzPZ2Tz^Y zKhy{ImP1qMk4$fSgwJs`6^X#cD00tl*WUI$!meptBVi28*h-13BnfZteIM98*zw@V zqy+UtI^WJy$>8|1gwvX~Wjz6(h$<2ilKePcCzeV2(D&S+4CNkA!KEuuDQ~)8+WZbN zQTB42Sw&f#9g78}Wzd4MpVy-`P7j)#I07xQfHHjlZ6_z3Rp^f`IaFWWURwFom4p%5 zsfUF^_E<)4c5zm4TG{lH>7oE@0{$+3JiJm^qCqq_V}2{lpFR-F#*^zL0d~^+Zxe!B zNb#RS1qFK%pcnrAzu@zK)%HI({?#e}(X3;|lm;a}}qTJcT^XLqbz9i%xvFfG4La3TQBezTiSD-RlmxuYF@)X z$c(UNWP>*c56YA~de|4lK#P23e=-jVJsy&Hvqz9?`_^1wId$_<186_-T{e~Ekp*1O zmv{tkV2_74fiD4pK`FDR6L#|Ot* zTDc@(BD4V3wT;+pZnW}{=Wd}J*zGQ?4&$-rilOo4WuqOJzac904kap$K=Vzi^)uPW zKO4OLZ36n^7mrj<_mVhPkMF?NlsvJlZYiNIHA4OgKgsurNAFTlL> z+2?^8L5gNZZ}+zo=>9>l@;QxL_xG|mm|+2L^kQD&K-5=XmecgpO3#0>YlfbNH7Za1wfU)Q8phx)a8*f|Ddh)%K$~?H`juP`>8_BmzMx+}`GG5(z zx=Da4@LP5gX7h+mGl`JQkY^dAi-MzR)NUiwSFfu-I}BBWT1HKGEn z>KPXNOm%$<-q!P}N(p!WFy}eP;ot|nprFTO20Bni0OhmOR6n4D)JyPx<{F(}!7fy* z{NdeI9&a@LlBV_Qt#vb3r^$IE=SI9Xj1_`lcQUO0)^Uwl+I*%x&US>L`Rtv*dtm2% z>QO%btOOg z$h-0$PrLe5LzCXKf>6>@ZYgl9U?1tqsYvY^^hDzk8k9+UJo9t{pxJ1L0c^Gp*xBOS z@W05OA5xAs|AeQdona?%CJdu>4^=Z*)61y4uxY=Oi$Td@ibGNvBiim@tz0{KNn7qG zY8WRnJY09JF1pgG+r4RkYC5JSDXRs<=ZI6DSQesHv^ox31u0ZX7Q*U0Weu8b?_pJD zh~7MmwjcdKw<+`ACoacK5`3%tRZfHU;RxS7{%;IHW;CLl^hcwkCbm}u&1O+j4fY3U z?Tismm5Tg(MT=HNLk8zj+_lGRS>S49JI}6wb}yUW+hB}dvoZ|0p6RKwrJhuI$=DNb zX(lfGddHvHvPbRKo)ee#+=|)K{GqTl)bqIfLo=N5+U0WcWK9XoOi^q`6EhpoJLY8- zi(8PRgNM8yx`Q7$JzLxfBrKFo+1~q&ZPArYbC6-rH2w{Th4uR2(07oWd`BjV?oRLP z`8GAtakYhut1k>J>D@1jRjXWSh#LcwN;=0$14-~tuv%J}4~rpUMpsi^zdkBMw_?4()is*Kj{73>zlqRd8gvMaxL#G*I%wx>YDs$w=pb zeP+TS_UVsXspRc#Z^O3sSb>F}-zH|*?{1&QUMYMXdj%Y}P6|`237v$CA5=4B2#8ZI zPOs&c-tx0YWb`K;OFMa2hc3d@SDVSe%?9djf|J;voWwQq=J-X;s#IG~s_(uSAf#PV zR?Ye~$q>+BGKX`XVU8u6o;_&_as8aMZ&loUc>{^km>aP(FA1<$b3piEfu(G(@6M^3 z;>kUf(!2{(*rD47laW3`#G5y|G0S+wX~*vB!Cp;vI;c=(Y*sBqCSnbGc+5)2F_t#&QkM;_+Q=6J9z**nR3}dk>;aLI zuOXCPya9_SMD%s9#~TbhugmJ_rAs{pvEsjvz#tovn{*cttsh_5 z%0aO*@$p!bEFhq7Y7d?)``4oH5O4C&C&6 z+_7MVg2LV6iPuHs%w&aN+h?XsSE>+ni&iGTBpXc$TfdN*!qc6{+QcBur^^7Y3wfw6 zg^H~ZckDZkiGViyGie@I&bZcN@nbk)Uut7;$$RR!!U5#D=1DmfvD+zF7H@@7o+H9z zM?_qtNLRj3MCGs4MBq#(ldksKf6FFX$2NXowR*^bV|HUPI-@I0ob;Zp#tiW$of)gN zNBpLMz!ZQAqmOf$do%e~sANTc(Rl zA3fd!-#U5?vtKsPboq+n?m$tq1`y4u*opV*8DT9@SaD~&qsA7|ys|o~uYOjaSW!Sy z(HcOxt9{l&_3W`{Eii~^iVj*A>(T$4eApdhhrUx|eIQ+3ZKbG$n*)!cHyLw{H5Ii! zE^5T*V3Gz+^P7P|t-*u!OABT3Z(F`BXDuDJWcukiI*11K`nu?fq*G#xAT*6W!o!tJ zqk#;-Xc1O1aXkluF)wpa!#v+6cLU?mZ(|dmK=aGQ0^k9U$hQgXKLY{{uwr}rSYOd4 zNF~+>Uk{KoUD^@aJhSlw7W=BM3h>atEhbRu}V$#7gg%ZaqBN zXt01v%gR2bQKm$6)fMUG4Be@2lG@=#wu4g753(g3aMVZ`Eky?9Vg}Z{FmCk;m#?IP z-&?3dtH5677yaS6=J#20X|4CC3B%4DoMxzH{MvUFEuvtQ>-_@a zU?Q^Rn})c0wLSl&emd>P(dxg0P8PUz-DiWfl)i2ImoG()B0cRk+OLC3d;UM=!seAG zjq`PRry_Ydjpsg}5rd-%I>LQLp?*n&>wu-D`+j5V?0-OMqF_*@@mNvsY9{H-6=X(1 zYPCD`q}EL1+3rIjc#e?w3m9;@GmKQZZc~a(y_epQ&LQ$!wAsKO#X1Vz!)?%B1Ox`< z9(l2aIw^H#j1QMM-t=oWPw(HHhi@oDt3vP1%gU4}G)jgWQK~54W6J+{>`Sh0Yfq_3o20 zV+W%pEKZs3KlgHFbdTdq25Tx~secfbG8a4Q%7s>KJGku$m2GT9m8UX?lZl9UTm%j^ zug|qhQoGv%xI3`bOlnZnQR~<(h740CgwwPlth*S->t(W<5}CV+hvEp6%9TMokw3qi z`8brmOyHVqGb?0p#Dq(SxV0pRoox*uPUt2=rHTTYklEAH;3~({VG50mH>0PWQy<0xE;=bwZ zoEd!71D=x6Ti+d=dh^^(s6+fDL7Pz+5~$TR#=|ATOddRG@jUwdC833ffS^U^)pREz z>j~rD87f=Ki-}f|NIpqaLQ-Zi4nz+YGn~cRqvf{FR-O>X(m2zZCY79yWL^S?-3)M4 znZX^sLyWb1QpLzF&SK|arw?6;$b~|tP{P4yI`cMfttMBb3Jv?+DbL%Bq1KYZWCnVo zvch&pVcut`m;3j;$#m7;D-1iIHB`QG2r!i~4-#e7Cu0$hkuP9_;oy@pE)UQY|M!8E zmBnJV2czu}(tF+yVA5&)jil_ati6=%!vnPPjO%RD^STbN6vPei>{*Vo2@u1+eHzH1YLvyZC@s$gqI|a=Cr6Vdk3xs9vrG3T!~q`Q{u_+ z_foXf!VvLQUL9^u>5XAe67}v1J6(k8?7R+uxs=+c7xT(-fX5i1i`Z0>dvLj}aye1i z;Ee06oi1?0k+YmL48NH&_=GBgh(CMIgIep)vw$j@W`pA`nHC& zxyaX|gqaX0iCH_~KXqjKuBX=CpGws8lsYZ0V$^vohPpT)JFGRN27RJ7_GPAtJCa9r z`>~sr@VseV&r9I)+-X(R?9xy!%blUjyaT+lMvbLmXGU|0$jS_2f@GO*w-Ay6Po&M+ zyl&ds{Tpmv@Lt-c2lAb$5B!PePS+;GlwGu8>A%|v9gJ7*$qa{xKuRVPnE0OKE_FO1 zNgjuvx7sC_0{#&|F@DioELVb-_nH&<_~7OBxd*|OJIscPstl{QUe?hGe~-p@JFnC9 zZ0+&)DA9`IxWs>cK_YIeTuoKhIODSKrt7sNT3w*5-)GtZg1$N-hvW0oL=Hs2W&bq) za57>(+3bTZS8cj5nQDr8y5hW73|sW>DrlXOSqeZ@!eSi)Nn%V2bkIP~^me{MS80FHmA~kJ%+@w)NL2j~x(F*G=H+vx9dM@7QH6+qcfC&F#P>XUjNDcjUm(X zzp<#;*SG6pv8E4++hWY}hWBskw8VaWFnBJ$j|;{+`ioO*ZV3-E_Z^PBn3lgH_ePl6E*Y+m13 zNq2b`PQGq63BOVmdZAa;9J$SXE;Q@csrCQ{_yBNaN7?m+xlpmm&7N=G-~}4g`;lo@R;dxLvCI(*`)Oh#T1sd!dZKP)5=Ah`_`?jWdEs`}iW6ky}`-Gj9mM2J%p5K6sqxxSfXLl-@_= zezqKQW?Xc(;kV#_Tn;$C)t@@DnM@DJU-CHXqKW`%eoQ#UG`?DT!F>Ff4mvE$Z5uVH z%N$>4zJA)R4uuaF>lb7B`PivzJ4?r5@73YNgqJC5_LgItVw8@D@lAuuUV6u7 zSff2ouPq+KDPyL$)tz=eu=#h8sVfMzN##jRE~{PR2_hZ+4w1n7=2s zYGf>vwWW41>z%#thO+_jN@HB`JIsJ+Ya8sv&IWvNz>A*0o1uamigsRC*2fCo1Xbj$ zSGo)>s3oa)GuuU^LNh53E?6`SZQq@GNnd%?fYGoDGLnhOSR=%lp&NgFmh)tioeQr! zsA18aq|BW-;c;j{pg->_Sm`wz9@Aze+~FHSS+U1tbW@_!C#`6_22Lhgt-5H^;Sm)~ zV|Q`!+~~cI(H4pG8+5RYv@BV786}pcgIp7nlqnsTdxn*``bPT zyk~g1T{pkd#D-h+KN%X{K6ueidZsJRX5Hin*2L`1M6X-Ums~TAkWbDs47dOBEBIq7 z8xy#t#yDt>3)l%c{^Jq;(GrA+tR*)xZuc>vwd%tT|7X6y0m@0hhNP_@U7k43>QrT& zBGCA6D(hyOCPrQLSdqux4s~Gc>8xwl6ySY=D`mr8$$P@pMj!CZmrkLUkVyy2OE8Re z;H?zLN~l6J-{|9;6FES5nbz>+|CMx=MH7+GDXV*6g61% zi&ZPztb)mQfcH2HyMhr!g1@!>KY0|jq&LtfBi&ZgI^0`pV0XV|RhkmJHH0S2DAY&y z2stArvtrn($Cx_nnV%IM(5QtMsn<@+S$KUZv}im^omP=>AEIPSm(BSn&NyL2>m);O zlM1FEabu$`DJQ)-@zq*O#3aJ@1V=HHwd2APk*j#~x!-I~*ZyOC6H7qw2@VItn+d&$ z(`~hrf|1K&N3HAkt+rrjH?$_akEd}ZE#gW!zuv+-NhgNQ#3~54Xy@3Mnf04uXg51? z=G7?f?#V0mP;#ulWCnd=GvtYVB$wNeaaxc_QD<>+AGf(GA{yIDJ#U2@+3UZ(JNVo7u`ywE zABUzjrv@HX=F`MZmvGr52ceA{eWYeFyGYk{hu(>9bM6Y-Y`U*>U!sBL)RRNHAY;zf ziJjSADM#Byip8YaMReOzgxP@lBD?7ib$x)2i?$EcnIr8r__w6JlnKk?7Dtah;<+H8O7mMqBs=UEl z7E2eb9Fnh%yusgL7$Od6hqF2$Im~Nh=vzP7h-Z` z9`r91;iq@}iiDm4XKMyM)D*X-Pl^P}9aF};k&7J5CO9TbSED}8Rc+-cTERI0L+xHN z5{y>MUhNpaqVD}ibQcf%Lb5-P4LhM+L%a^_KR!5ao7!kU3=%+|{Rz(E!ji6-AdGIa zzTZA+<6JvTKbF$kAmHat86Tnuw%P5-gppO{w&9#Z&hzpfZA)-NlQdVGZZl)Y^lTxDGwcDq;g|FD zb9%oU!nRv|$Rumy&TP)Q8z)-5vzE;)l$JOmNG(W6%ALmH_rW2~;d#+(Vd7B`+TJ@S z`kOA3)di%Ks~{Rtm=(&;s<3A-uWs&ivMjMum|?4(IO@Gk=VC*Xj`slb&?A~BK_6)b zZ2uYU*9i$1`FOh199o=0;^lZO$sA0Do$5SX$mj`pl(ILpqI^bo&{^SIR$pexeRgNd z>tu^Dmaay5;1uv+u%e*Y3Fa;qVyC}25N3*Rv4`V2*ewb(ROV>GS`%Bz zbICk?E0WZ0LbQ*#;^ELiwgobtn&6@9)i~0wGa`e@uYJqmO;RQ&|2CGWo{@DDugPIt z@caI?BNPo`hG@O&-g3>Si7EV|MPK$|sRUuD%1St0fH8-gpTAQxePSC8vg?!7pW2Ts zwa%6kU-Ju+)tJjHugT%0$^i1y8@mP!lB(B;-S6@-?;AA~I~dQy8}-Oo7|vfZ5%~3f z$jv<{w9>8n?Md-dLPe+{&zig%K7?%xVzo+)5$CYjU?fe4Zp`H}E8kLgP@Ls)aj9V} z0}5Q_V!_jEvYl!q1IItgBD(AXlQTnY;NWA3T(F&_%BENgTiNrOz+;_F?x;uo6y#)! z)<7h-yHtkN4;Uxl(IB@bMOIJnR&#!!(6cwirjDuZbLaGgz0)FF2u7Hfmpy+F@@0>I z%TuMN7G7|0kLpro3?k=VT-Mz$h$2%fz-YXna7d;a_%*{ZpQ$2ZEk|28N3}bC8K>GP zM>xRmEe2`;J6jX|zIN`?!Dhsztn4uV?j&cTC=h<*uh}g`rzjcI0#CiKpjS%OpdmwM zHTRVwVJYoBLVrP7dQhfC9%H@Q4|w?Jhoq&>!EJY3$4$PHSUdhXCX?(I!XMXul^+%T zp&~-DS-F{G*Eli12+H7y*p-Q7d_MC<->9BICHb)uWA0|Ts$XY+l*HM6^l?)*8W?eO z6r`WBTfm3**moIdVg=U4w7D5Y7o@{ql*e>W1o01j`>FbcZtxv5}SA+fznI?I~xMNyoH9;%Lsg4!2a?*?c<~UaINlq5U9ZRNzrJM{J1f`|l z>0m@M=HGmr_${N%A0_Kb^-;VK=4bRi<#tZ4{{sI}9}tjqr&O z=TowD^UAt6Et{LvyWTs9;VvS2qvP%T1v6_Wma(4bz3m@NmHM_6Oz1<++wLA`Ea2c9 zd7g#6=LH4P5Ef@LF##$!`1UgLwsU{Up>zPN#88^O>>4-%iR^qRWhfFAjU(xt9ZX3B zI~jd$c5odpa1*-~@AugEuQr3_At%q28>rX^g_Uqvn!%7rWIkn$*|GN+Gcq;DXr&Z49KT1kvka#5-m#yjhq)RdqukoJ2}5w+b-o;ZB`Rkc>#&N*!&!odnTu5 zf~8-4gpSEN;WIl@+lzXIa#sz?cK5Oy#T$Q_AWLxyF`zn1s&V++PA7kjm>$hF@c*(9 ztO#q%Sq?siW{wB~VojBfA)&_XdZJvItJk)pq9c)NUB(D4zEPZ=_mAh;A;V@Sy;uys4YW@04 z(;ObwvLIo^YTr@d*@|rR*?f%6J_7k`*Y?`!^`zltg#6w`T@^Nwut^5416)halW%NZ zd2!JyQkZ?wm7VYp!3bmZ;l!5-sFra5A4kO%Hyvc#4(IM+_;mHH!^ORMI2Ix|_bfT+ z$du)*@n^~m_BxR+t}+}`sj$F`Fh%gVPiMjPTag`h3pUPbIjSjC((1Qe|FDLJ>_wN{ zQRn19a8s#~it!y5^%TshZLXn%E?YoPe*18&?pe^C6#0RMowj!vK)g-N$%I*EzJCJ+ zciM`8@FSY&?0Xz^FrDC0p=bQ%>3Dwq8w;_ewYXj$<4at^u5eD0D9(eslEQ>)*G)e( zx$L6ry$*~KrB5ThMuL*vTnNr*u<1(R3$@ugewN+h&tdtu$X|I{J}v=*B5$UE8v5SW zN&)_Vqex6H5Sf=J-zCl9p?{6}Ry>fz_fMhrZ3VEO^=jJp3cLM|6^c3@T)mvPZAzTLlaNA#->N@H#uozI!u@GJ|Kph?29hi1_Id-8Mh?U06^fj@ z$o(8BfN~Qh`IaP8{SX?IUX>J3bLU=-hXh3jSZ1{~(3OjsUQA4m>7v&Nv?lFoim+r?e4vX0|7;>l z>P+DK4%i<wA898uX! zWG~qDrj3lnqVj|(b$#+OS_4HEr0;5kq;CXJ=#DBWocs*8Rxr&mi#u+kO3y}|+LMA# ztXG8H;f(i(-xH!vt>WEqyP9kYPtwFGP`MS+dAKcj93=rOSMbM>;aeTaFQIyyOkZJ` zg)MKwsVfY@x@f}oanw}L*2N+^7qg2OUmhpcLuy8kTeY-7>j^UL>wN%a~Qcsq$xD9`UD}A-MX*x z!uzBU68gAOCKV&X%B8==V^+J@PkQOhSPpACRGRaVM5(m(c(vhipzqajx{@jap5U^4 z{!G<*OiLg-#6gkVgQzx3wOrepbkg)iL1dA{e(rYy@E$jWHqlF!t}iFoiROkoDGiD? z$uuoi{yPXgqP-EvSeTM!NJhN+BS~GlRi&X6(>`yW`mxq=P-IFLT~g*N4o6EdY+>FI zKlQ_S0HqU6ZeC9SWg;TLy@S|<*kAeUcV#oRPFG?O6V6~Zp8P9i9Zl|f+Cb62=HB>6 zTqOC$hCLYYLmGMCA)}0o+RfnW496md zKk6jZS&_5@ua|vMy)*W^tMdG(wlEFX0sm$js;4@-1f~bd!CB*PvxnL{>-X2Z3eYQU z5wH(7cuG!9B_ZrDY7L6E{yTO)7kuEZLl!&!tE}YjyMO&YO6cll5p=j#%oE={;5PVT zCb7RQs2jKu1uT`h(;D%*Qq#`B6|+FS`#;lW@T<$W?MaeAyIueg-xid@WM)0r>d@)^ zLAJ{Ioo!V@!=7zj&C8J+Dss=p2utrQV&SE}gPjH<|J+b5L%Y@+qV~iOyN}g%ZJN9a zb84nFcC!Q`eZEAxp`{#_r4Ac$r(KYp!-DX_)FkWt2FY4UAaA~|V1tt{SMF<^3y{K- zIbw;NY?YrEaawpz8i~#0g^qr1*b*yuq6@NSEiL-X^rc<_GFh9X?}^uu(~fhwVw0ui zf@?C%vNgXToYVQ=sieC$Hez4+PM0%Mu9odW4}%aZ%2>h~r=3wZxsVSFp;_Rllb2DU z#2vYc2z>TdZ43HaO8b3u8i8AD$WgzW^!okjy^!N1NoNX_M#zfo)-Z1(p%qs_96sGq ztHk=_O&?8Ougmx6>58ho; zwsron#!Pd=nUrHwT!zjB)c*jj-NaP-Tt|Vs6y8ciKXQXi6Wjkth0Ku;RqX~!Ht&w7 zKU!0|OVT@Sv(Q|x8o0jx9-Y-KH>Nm#xx_-E`=h_Y7wDH5y>Uk~q@b7||0O*@j9N7mM9lwAgZJQoe7nPL|V{Rp|P~%>I9H*rg)Z4}!-uVetN!~)G zlx3@5mJ|&6+Shc@k z68t#ls9r}}(31quK4HD*h0Z%K9hn^dxN=Qxfo%0k)uoZh)mwMNE`zRQI}-Fd`|-hM z<}^1LWvc@o;W~Tb#XfJCRQSltxL_x+r6Kz<{GfP%II==Csnwi@P6P7-zfV zg823_xSIVVw_~#J zjRtOM*+|h&gcXTeZfOJH)y2F8HD)c3K7GP55Rd7)apuYEh`z5t*Ge=ueI5#~Tu<*$ zMn;g`$J)EdQp>UJX}JCg|W`YTvh(@ZZ}3UPZoT}+&QWDDM&J-pdB zW{iLG{8IC~{g50Vw3ci_^uez0gHcaNR#GhSYL*{WSa|#BizAux-kfPY4Qvm(?JJ_! zrQ~^u%U7?zqT7v(&>I*q8(VnIeiD+M z^;AW2n)W>M#XI~_`jIpiu<5U6Yc^%(QkqA^)GbtM++#u0T1Qt`nDe-TZJA|XD;y-`-D$O|Fa{x8c9vOp zqBm&lgIx`!$X4nsG!xFAJ%Qqc2l$Zo5&7{<7Fy0U)(57EHV~0q6&)M4F)aMm(2Qj} zTc2=N^ZxzKfG~%nC=EF4{bus`MI3z_=(+W?^Lf3i11>wIeR|BQPTxKGDUTrxR|;GHugt4HRu$x|SS%dR zcMp4iMHKgssX&MiEiLI{>-`#6Zdn*R0b)>wlBqy^fsZrUGQ;Y7rUar1F!ipX_QHh9 zSZuYSPs`Uo(cC47*;xu^yO%j=QX@h$v|Q}i7Pip-1)0h!Ty;V$^D{PfzkJwfCt{x= z`FBGuWAdg`%KE6>$zE$_YBdO!p|L@BUp|=aB((L8?P(I4RCFXhRv)jjK?2edhgOq& z(!KSTqANexg5@FYdiAhN?R&>BO{|cXLB(ViAQSK43oUaaXW#eTRFa{Hz}~oM`$j zc=ndPZ;gnYJT@`FD8Dzf0{GtaY{+PG$FnAzG|rtRe=U{}8mmlt6^7KB#gYsDg<15O zul63PJ8)^oY>$SHb68*brP}{uzNNTwsW$vtm$R*8U@9qXn7z2}jn)uiY#PpPd&0(N zYbl@m`x+dr)n1Tt%{ZH(f}BMG_1|F#I)%Nr<$5_8TK{L?bHD)bVB0|5KC`SgReCLC zIj1?d1ire;E!bWNF>_G|xIONyv0+BFO zfn|NIUFy4Y9vZNq#r#||13)vn`5U=8ke}hDYYYZC7-2zkw$*;5cMr5$Qn9xttKKZf zF{d*Ep7~i);#4+C<`qmlj>|yt6?6q5?M}8z;is4KrsR0?A-&>G)gLWY*}s3pkMjtg$}ypaWmcZbIdQNZ?V12-RY5%EVrYLlgo0>2j8 zthyT(kP=u$uCQU;qG1Z1@#V}9wYq05t#CjI?-Bb?!NZ9MjhKqL`y2#@>C)`AMA43iJ&-5%XN~^o_A{2s1sRODnUln%g)I#06x83MY%x4uNJN3mt=-1Nd^!;C zLUEH?$_#$#&%H081{3R`7@<-(xAZ;=)BQTDHFXbKc$D3>7>7Lbu_ZVgn`Aj+t)46| zDlNx{0#l@4GQYbA2O4+FZ*RVwhD4q{n?b8kBymb&uGvxYz*D&tPqwzieG!r83$d5( zM^e9Yl3wB9DF?DsP$(a@c?DKPzY0q#3$BU^7@d(x;ck>Qw!mtUd5d@5yHlsh)y-Ik z;>iZG!~La)5F!UYt=iuE@E$Ao#<4qFzwN$w4n)rv)8W83RPN1I+soQ%26KD)Mgpvn z?8ZI0q#CCe&%nW~x4XL?oG7;Yi3v=6Bg4BFU9n7eecmPxs$F7zl5j`#F!o{7>l+_; z(&MS4vw;yb(ex?#tsr|bS&?I(s+I?+R#P#-+l3~g<$apeB40Oq=zLpEu1>Wt)16T~ zAdCF9xRKiXBQG#P*-eEDAmWdu*$Czn{Xgmuo%})C1fVRYmTgy@t+J(_>Gh0=a}YeZ zkJsuu`#R_*n{nxtFAF|xJy0r9#*t_>o8^5x0u$Xotk;`g9P_4$6tLees9mo@|g&`BVK@}E^s^UvsyN357BA7?Q+d_{iF zeA^lk2_f$J=@KE(b7=obWYyxh)z0>%azK|Z&N-heWR?zs*jl@%170X>$GddTjacSY zHs2g54nbgv>SnlDqb+MlOf#6d*~AxVb6n_8galS~Tle#DjDK2RR&cDfJowk`J5NZs zU~7|Ak2LI&ccOJfY_P5~-waFv{6*2FiDW1Ui2h^OwHW9NHmJnH#a$2Qi#_G{9ILFn zy>RW#^kEbv34j0(_JwPUuXtzI;J7`D&F)sFG@yWt3B9>RPGZCRTq=luT{LsoUJu8$ ziG(xJ#$G;)dfa`!tk_eR$4yeT^O(JTJivEz{i5#D>o4@W+km1!$O2hA!YYV+e9|9} zLLy0b2t5myx?C&=3ZR;=&b1u)E~_a@VN-C4?A~}pOX5i3oC~3TrXB~Z(SyEz>F9kF zi@w>Lxk>hS>sORYt$8*THFRtAZ;kJQD9FCWb`VpZqp!RA&cirFKoz%F8j3fCmdl&L zmCFhJPm^6ZsH*#e33zZz$6z6&??uyA#I+f-$liUS!iPf7s^$$jGqM-qEL|kMd0kB>Se1j*VI$H6Bfk!s#6cP^8sBnaSLtB?2m0~dK*5D= zqPgMO{$Kuzv$HI*7GYNyaGGDTcus6Q+3NOTSF3t|1%(S-11vkA1ILtj(Bsshy{r7o z4G@TjJF}o}_PNdS(*rue?*Im4B7dU!FAYrZecib>r5tTR#y?@FI)p+kLuxQ|<7OGl)`@Ak3p-)~MrY3sFZ#f&DE` zW+54dWmy~Ax)*PKYDanY>Ht04{>uV;ito6QLdQE zTd{m3WZO_*?v`n>I_cN;Ad^FD!@QPv`m5Sq@=M7s7MdKxFI2pIx^ID-6G7mdK9VA!1j1?V244~1R>Sy;^Ghr z@fd?!69<^~Q$K#n0;#M$EVX*Pl>PlvB1vYCZ;_XZ?PDeN-tM874t=?G!{$@)BvD|= z7_LaQ1p>v-mLPY;TI5g8t`y6VKOeOjGNupU1 z8>`4wNmef~-0hdUyI4x6M-?`5s;XTz8(4ib6~-!bWqsT@85ZOn92nKH)#u=(UNOL3 zF|;;3$FJ!VVdjQvj-w2faZht<)|tJ7>oFJ`Tib!j@SkAuj%C;TB!5Tr>N6CiF)R>N z+`=-)k(p@UK~JNT;lCz#(-gb)Ec%2!{VXSasM+9G+<9=6ElUkTM4xenpO@91f)|gXRdy`e81#CxmQ5gq#oqem=o*O4? z;8d}p7OgmaEx(YV)lCBF379N)vm!i^x zUv+O9h5fl!E|*#h(xrkUdrH$*%dxm}_Y5 z{YZS{%~)1Xd);cDH_@|cu3N`)adn4UoJ9I)ryC3%4;8+h+29+CdOqglrtbmKINQ+n zr*SJuQ^|-k_~F}=&E+`?p*e@L&ES^gjHe@7a<7)1Os0wX+=_ch4XbpJy#pFm1I`k? z{c~v)o`GJbysw!2XAS=c+eh);k01Q&vMgt*!cCSg{hoKn z`*S3jpA0SX_2XwV8Nby7(m$WnBy6S`HZGQs0@|{v5u@IhW^g$p;|72NT*k@A zA@t_4GnyEXxmxRd2;gm)zBQRQ53n@3-P~^5QB4sMqJFkg5uaJ^n zpUAHcKbjlWBVK;fqLbKc;7NV8{M4$BfCh(*!gvXl>u3OR(61;wL9Ep4$}c(5%60)np5Ct#in=-k%i z&(p;*d}(xCVtseaSG@>-b3D+aL0gD;@}EfUS8@ghsM9MV<3+(*IpZS+LwRxf5QxTn zM%Cmd)!75I|8&%1K0p_vU-;TiA z&mV=YB|BPEPz}HxHz1KVD>d}!7APruFT`ym3Uj&PbhE`$H|=E4VK#L*YWd(fczKa2 zVjI!meD{q9w%<7nNiSf^K3m^BrSN+rV&BJZY{Zpw{myVvXMIj6pfk<&OlRoQ>~_FJ zzZ@E1$9}z)I~%;5gS!fI^|Tz6tVnV-M5)~HP6#&y>}?>+jA=4RSo#L zo3PXAVrVJ1wu(pKGsxLnSAw@X0wUp&ESk;5H&X+52_-KMx`{Ccoa4&eSe#H zu1y(fNZ7DD%F9o56usOQCsz^s$E8cbV=jZ)23=5C?IM#SogF1T{{%B5`bA+<%<`PL zZ(`ZE#@;*CvE<2yfst@G@qP^a!FPtRTV#spxPpGtUR!*T2)B1Y+gI>h$BVU(0^P*Y zkNkVZ;)S(1ZlL|-94X@$(VEDfeY#{7V8)kwo+v!thUTvUwP?3^))!wc;Q<`HKN7lr zk;UR5Z}0DkRaHey?4Z9Z4WGT!kQxszwa}693(k173{QkTDvXILnif= zA+g{~;btT%=twUhT}B#>%@$Wa75;8q?^f(h=t9&{;L^}gNl;ZVuR_>E8RpmxhvAQL zo1#!rHiU6gq?pcu6=&A1lk9eTddodWwug5+~vigoh18eY&P z>}Xv^DcEA(q>$`SVgnS`=3Gq?{nkf!s%w%#$l$d9?#^+@)AL_VJasnT z@S}r|g0>2vRXF6f|EU%2t<`VLlc2Y>zV~J~xX`CUmC%{vey&8~( zgC9A`r9LV)mdVSJlMqGraC)(ruPIm+jpgNO)BifV0u)*$&c+>2 z8J1#)ona&Y=$iKFB-mqJ8yrTd*G@DYsHqXX==P2aeLYcTeW*RTC=_ZcNCE1?*w%&7 z0}Set?`^*;@71g*j}2$ZSRKU~CWzt)!!R-YkuZ{Vc?G zL75jWcaeTmy1`nhce-cyP&3H#12coHcUiv-s{`jnH+g^d^7KHX==$sR=Efw&f>7w8 zX!E7giYmnOmLA9i8HmZ65QK%Xm!aG}8xv{2*5&nkH^^LI->WJk9skwU_+4BcbX9~t z1Fq`DsPVT(TdHEVv1$ywUhk4S%}jm&v1mtoTs) zhSJ3IS|mEA82=@yt@wm6-=>(3TlLxH1LQv&Inr5da*=A3UYbj>8&-b}$pxO1;Psh0tVXZ% zM$$}M@6Jrk`FxB)kU+f!89ly7c``wH*=T`&rZ9X5?6D}Ee6k_r$GRGtsj4-9{dDpe z{O{QMCN%I)F>46pC8Nm4J^h^xJ3qf0*C&Nmk2ZWh3*1B;Y22grK%3~^oU-rTLvIGW zvC+iVQ^VFWGe3)yMZ}`d5_9V%5}{77lbSa@QkR2xJBn5Ibw%nyibTo4;+-jl(NU$i zR(6lw)Mq)(9cQ6&xj@YxQ9tdu4W+q1!-p^SU#-e{Y_&Z}0Y{-SIDP;6|ih ze;qPNDs5s8x!?19bAA6^Zl=RXKT_Ura=6O9YPW-!3qv#7e_Y=2S67%y6ky}q-rAPm z2@xLQ8pQKR4n>xur1orcU>|%Fk1#*i%B^TUJd? z)r`0De0Bh<;b_=@HOjiC>9|=cv*-CYJ0F@aPAMe#HD(#0Z#7j?g29+4fcJqnstta* z^GOz7c0Ie~lk|k|v!{tIBI`Fd+txIPA57&06b9?A>2~-2=IW}#Pk#j8zJ~bX*^{<6 zFF()Ya+Ts4Xy{--*?H?wYvju!yHHs9)wO5Qgx3DSgMcReFfwBhZTHFIQI=yy{`^F= zY#aQdD_qK&t^8MB?q&1IyQHC|!6Ta(e}njA|H9r`XX`x?vEhFG*b><~oMCQ`XbNFp zd;fAlXF}H4aU0!o^Lk=LB3#@=I>F$J`lCnp`G*eJG1_(eQ}TR%>vYir`POw&zwK*t zNK>1l(UEvJ{IjQ(8Idxk%k!utsA8R$^gQO%;Qn)ir>8o+tfAiK-2vg0O|lYf)q-T9 z&)FJiZ>cw)wQ=fFSL-od6S3r+!0l~!vf>-zMS5^}D7V_`_?B2l**@zsON{F4A#@1X zsrT7eb*H#z%)*{mw{#q6XkqM^M&_h8#@+TgX}AktC9>!De|hrs<{}kz!hVIRvE{Z{ z`xHm`K=WSK-hXyUbLbEUzy zk|`RVLQq(rF5VloN4c3t_KtXT(x%Y__RdL(vTBGHzJzqXYl$F0F>>Zdx|NPg{ zw*jU98R$t1*^rjDYvh~vA|=%f(iLe&PkG`^wZyk-!QhO+ioMz;h+Bl~d-t5+s;3`? z!dZmx?*#z>LhrQC|BG4UrSCwpEqM05FFB6^W)dc{9C_^WcQAc9od*ZIaQk`BnhOTg#VNjY#-<3>2ef2%Ah_8Q_G3 zSF63j5Ca1rP5ajW>BC1;-(fLea<)JAU%ueULRUnDo~~eg^OH@y-~8l+J$zQ_aFy-4 zf7o{Er<0#={62QUu9rB~>G$qd-aoEXtZnl97R=Ia-bg8XKfKDwYyIg)W_?J;Ik|)z z-lQci7g@OW@ROJG?PuQR1)H~5y{m6IP!2bd#ccRRedogu_UA;8jz;L=rj+f~noqKx z<^T4jt~;x+5XZ@+dtM2YH-4bLSl!wf@aDg%ev`0(>Avbn(>%Mw6qk3J^K9KQ6$&iZ zD<->-vl8t7mK+}uk+b_D*ZlB7EtT10p<4NTIF5ActrEA?5S?Xtl~uDY85PwfO!IAo zDMGgpY}DH0;{rlYuU2+whQEbnU7^GqVUT2Qs<74vbkB0Jv1j2reh=FRfg1w0=@>m< z)(wMUSQA;!Bn7_SOAVy&+v4K#tc?!CUW=cuJhx2FOjo`=#u(VB&yK#GRn%pjmJ_Ex z=9C(SBm?E~-uN(yE14EA&Smg6L;NM893}F4k2>Hw#$dp@{GS$e;bdekJdVDQHL19h>exl8u z5B2ILM(d!MlK97e_0}t${Ie$_w-Y#3@=s6D{i0l8He1`$^A83pD&E#@f2)kJp8mDp zgt=ao@-ccg%E#U?h_K>$3$0MjEAl>TWwYIZ#rnJN-|XEdv6!E~zLj0q9oNe(KzPUH zJUP|=t!H87BKh~u4n-Y+nCcQAcI9|nPZqrpmseTzG{EroXau|slQt&t?j S(FyLY$$nS*RxM!^^nU>fBM+$n literal 0 HcmV?d00001 diff --git a/tutorials/text_processing/images/task_overview.png b/tutorials/text_processing/images/task_overview.png new file mode 100644 index 0000000000000000000000000000000000000000..98aefe0364fa333fdd279f08d6b1346d1e7cf6e3 GIT binary patch literal 49787 zcmdqIWmFv97A;C3K!S$go`euQxNFeh?(Xj1Sb*RGg1fuB1b266+}*8##(9h7QKYHjZYtPG@lKd@wK{ zV8n&LDY>N{uXs2qLA{_4P_56S`>^j-LtsT;TM66!GK!&4uxbe2oZb9b@V!bkJUMkI zc$XzGmm=_%r%Jiu0?sA*N6jK$fwWxeV${B&ST^dn+%Gu8ffDtdZNR_{`G>r<(!g=R3hSAOF1xmNy_idiCG$ zz^BI6@8`7tKK}QJfs*+DLpv7wK`{RvfX&^Di>TQu62C^@({z5*(x4AEitl-l|1&;s zVD1rG9wGGgmv`PEib^3M%p4skq z>>W4!ED_i5daR{D|7`N(Pdw%SxyJwHj+-iyAMZA2uXUSK+@2bIJ~(RK12^(|dRW$7b;r*u^Eb3_tE`_9K2ZzKSI9-%b5JC0Md?$Inzi!}_01^++Gbqokw$0w(DH zI5*GFw3}hN;nM=6IX$4b4ArGX*E zlRYOWC`#HQ>`p8~Ko&TjO}Wwk=g8Q+3e?X$;<|84{r=f6o3dxi9;Lf+|0D=@KkETS zS)hN*o(K(*ii&F9s(xGw#%ubZhC}X#;MUL+YDTCz+ zc1BO~QPY>aPH@X~B981?w7K_~x)~T4g3TGRls~>hwQJlv4<{)8M@((9WtkIrw`b@v z1m~M4w0FNjRhE13Y_mkGv4v;1K3~Q??Iyj3GpQsB4SGL^k7w58VzUEL?;YpOB0+N^ zZp_-k{?Fs{PH=hANt*%-2?H&tLejW8-(2_;hG5o2!A)2j@p5gaBL|77JT{Dz&|dD| zHvF8=%o7HK)`itG?AMC0NDscY|D&xYlx_j}SYb-f*8QgRHyJ@+KDQ79g9Su+k2B=$ zEGn^UhqP@WuCl7UGq&{HKI-)s8{|xUjZJ7G4q=M#KT_F88%`lQHiN383>q|PF1&D2 z+oj^Y6gIbrqYL3oB6+eavHOF_E6j+X5OYQrZOPiTH~rmsf!|a!kNkG_`%XM&1^zLu z_9sAiY_ujQE0e!_qcwMUSmiX0FkYPh#c!|8j?`D}ha9c2ZeyezkC6&5Apsu4JA46F zrPUKpXxGkuM!5Q?|D+Kh+EvFGP(DSyiTb`BSiy}Gh7lqx>ioVtrB@oChcM9B#Ei1o zFPqu~e5u##t`+83`hvHzVIx@e4uiGp?f6@U4|vGQP|}tvn|&5n)<|$So^xB_nV-(w zKjtI2q~M6FQ_oJSOILfYVo5mA@FWm^T%I*~ztfX5Icp=t4d`mf;cm^f*!|mH!Jh^n z7h9}ruj(go56Ip}qR27@(_pnwb6E(gz9h0%OB~Bh1W_ZGq)8!#^v8 z_1`OQ#$y4)YZev`>D{vb6g6PYKDmyHoc<)wZ*Oh%B(bTH>Y~ftsrFldML;Vo#kU$h z+Dcrl%Aiw_gT!Pl$;)&8>0`!~m!NQXCLt*MV}tKEpIlNA9c^CSU!K>wO>{)J}kS;Kh0@%xbIJ73CBSw zGCaF%eI7LHZPy~k_XsbT0*lkh-P&N2ZEl?TFgchvNydxI--Aag)!Chtw0kixfVj?d z&Wt`DL>&Jo83;_H&caic?Ti$dNt7Dxf_3V3CtI!&aI287m z&sfDRrt*3}Vg%f|&0cfJ^ZaQhq4eB)@FqeZ{I?c;JiI_(U^r^=bb2iVU;LLG_T3+9 z1vJ>Nda47TefHOSKsNq+|BW}nWfCXt_T5V8Op)U8T01`m&zgNd@$HkhhK9y`jp;ac z!1$z@lhhZE7M*g-^U?!?leQb-cRW+^xHza2PdRoOtNj zW=Q(1QlWJ4vOPjU75=Ms^nv>8mreUuuJ6IwWT$jqHHYZX7$ZqRFnHsKD??9sA%>Dr zFYYz)G>r@+l(fK2!UTDsIS#po3h@S=Q|okHFqFZS?J^;Z@4nvp273YX6y<%Bwjib` zhRN64-+vkzQi#Fjm4+ACxq8!zYc-sexqy3}bwKc3Na`~asgDPY==dLNsc>=`;Wr=o z!mzZOkIc-tp$cbNphKt!JqVp~Xz_6VT$oexvbs`@cEqRHyvNnKRb}jPkQ0j++)>D* zDP~#dnVniK!l_1!lbXkxQ?tAzT^TZj=rEA1DGHv%Sbla{Ew?~sl`YGD^j+Yf!%0QA z>TBQl{1Qh=LrSd#7~T9(C0IzF|tu zmK{l&U$ehhENno$TF?G#&Tc)Y9SXtdT=t?M*ma1ChD2+2Vd-ePoKRA%gmQTJ6OHTJ zxMl=IEGq+{3#;lc;|0g2qsxmmpF+kXNObOowH)%Lo0~G0#oY61(!q`um(SDrQ8iZ? z98O~wWD?^(wmU&=Zcci_;?%OZw!|2{^3F%dfMw`_v(#`p8 zLr+gnL_BW4>+9=<^;*m(KOrF@0i)%4H7Q2K=jqnNo1v^MqHLRi(=^BK#>T?+NK9EQ z=vtWaWO}132k*fMg#X+o(Fwl<13^!Yo80DZE=Y*(OCapYXUq5M5wuv!odvi1NG_9v zr+NI#k^F}gM96E^F~}q*+!72ppR+|P^}dP8UbT~0WMNDj3U6ZEcRyVPJxrpe+3}1d zb>M4&G)G`o=fq_TgZxkZ8~ttgoP2`$4^3Z^o`k`az%w?oaoX*K*bL-_3CzO{wHHR_ z(g-)s3FjYrv;!B~jv`@^T;yeOLdg;jQ zz;U{^%-bow!ktF_#eo$pi?e|j(7RHv;VK6$C$zQXb@o_JXn#6s(LQl6eVoK8&Y=4^ z?R}TBhDCIz)Y2Gmn4qf2@WpT}_jZYJ;qp*a!Zf`1zM`j$Wp*jtzsuIzXt?Bvvgrhk z$X^svXEvU*nNd>Y<;Y9EXImlLY=hzs1uOc@gf*1;O4BQ)P@HHG7Ore{EzJq8)DRZ6 zv+^^AjTa^jXXG8uRn&Mrq#RqgrMRxTr?@_!*sAFve&1+2YqQaDnH?`L?nkUZOzet2 zv)P?{X-hldUfWLVG`4zKeaeNI-@9}JFYVE7Nw40Xw(edF&A1%4H>4txXb#WbP|rYh_MG!+{{9k-yWT%3;Fmw_hr(_u9QjzOhtJ zRZbL^CX(Zqk&ggOo;Dyl;Po3m+Dt#^ZSlGHx^%oecnbyctJ$fAiPjK@iRtKW>=_nv z4ek`?;m7GHan;H&J6b6V2kJA|wsF5f5I<>*?gPEh#eXsXkqcx#N z$3%5Mmp}b-{Z#_qg-dPZaD{99ElC9n_Sj8EjP+%+>tmv1dZy zsH(_yrjf9nd9^@2o6jD7J-~&ubJt{#Q@)Y6*E-|0Q*&Q+TtyQ8j=gM>do?^O$64vm zWs%#hge=->?hyui_y~4f%M~l`Q9WBu4lyrP1sk_JbrQESdran_aElF;3|H zpO-CUDQmpv)+eyDXT$wFEL**Y3KG-Z1s}3;^$sue@=U7@YMiJ2Lz1G#=fE#GEy=&< z7*I4cH5VFeNqq&@{f<&)C?w-)vnndk6NYx4EG~No1DiNUx`{kF!UY`l_A{TUKt^`X3^5 z3XuSi;mBo2kg-XIhYV}SWtV&^_qPS9aExE@oEx0q7)W|{oRfwxYbS9}w?;8-fdpTB zRX1^JY5IYw14x)$Pie;h3VgQX7rT#`AUxaw;t~M(Y_^1}!3kSW{gcjVK#LfxezF%A z;x7~cES4=qEa{a1NC`4d%}OXve~#Pj#M8&7atLs^;qh~{;BtBFO3HjAMMAXLWe^Pm zcgez4*jFUgSc!el*4IbsFT&(?bU8H~{eE#)k2{h)kjkGK>?&-%YaStV^eVqI_+||; zHoveA^*2{-&Fu2?v%7uY*%0Kz!I0DvItI{;AE7!B-zrHrtmv=W8o?i5Uz919Vq7-Nz+RjW?T6%(Ux^V06uPP=81!r~Ls2)Qzhm%APs z6@eU8TTbp~V`;hd1YPStYOO4ox;a@25zWcX&6QJA3m!GeDJcnCU0ubX(^Lj8nfP_e zuRWo5!yr@I2MEv#*cP&52om76cpX}wri0iw-0->?8eJD#0X!pKPacyVE%TXV16tND z1_ldP9?SFG=6C~XsSro{^%M?mVR9(gKw8SPfRL-x?RO)NY;CL=v01M{)$in3iF`6h z>V-v9V6TC=lBRCiwC}VL7t^2&X&U;rPOoH81)Q>?xU|II=}Heqg!30#37SAnn|_{_ z8cg!DNQ_}DZjzAHMcz{E1AxCu7u?{2WXk6aH+{#dzU(O=>=>4d7RdVmhaR2)4#yrA0RWV8E~v}O7}BIQJyMab|2 zlYmsXJpcgpXur!=1_216;HiM~gkM@X+z!~Ah?6tA6iBhzY3>U9x$7*e{*%`RUkA!~ zKX)Ida!!b~gTeZ1SDBWWbUAdz>3p5JLzj;|arP(tKUB?4o-;-ttjB1*yT4m~Q zmaTaqXR~qbZmM zsZPFkU?e{~v-X@APt;nGV!?cat?s64#(4k1bOHS$X;WTaz{rRciRbyZe3`VNU--=X zCntG8NKq-~YuP=7g-gykqjcke$B+#5XK2#NEPfUef8=Z&!02UM~5!SEdWd;{r<9`EC-lgqORxftpC0A$Aey1#?r7tJZY6&?aW zIpMSs0hkxQ=;DMp$`|1k#RtS}OnMBk9Q&$XOwCVzmkJ}N_6;Iwx7l}sZvw8q`}61Q zXDb!L(BjYcCQGzzv}yn=`%)kyI?bwZyo;d(9mctlDG*kq#{{98@=0kMjKH z;^sg``Cx+3b@J?Dph3%6wKV}TY*@s6D)Q)V1^S|;gA1s5`v4%^wTA014RxI-O1(<` zjDG$cu%J5h8{shXBMrphg6)1%_r*0W01~n~Rh7-uE*MgX9IM#&lheBXtY` z5~B+d8e}URs)ucvUcpRJVb5U)>%>-hFgVJR2GuF61=k#e7-Nl&b{^EOj|}O%Y%t9} zHj>OO@V;3prc62NF{siLPaZ8+J)FNX0*HudOw2}4Qk2j)bMosnGQ9oCQQDLg)|S!? zZ6kH^qe9MN0bS&4{*v65+#d@uqer_wI|fG=Q|E%5!$^C~+!V;BftaQ$Xb@73-u#vB zCs1oMyIegbVnMH8@8;*dQ}S3|`MCYOl($e=BNmsEHahxpChS`VVQ19UrUKcHp^-JfozwKCK=%p@jmqX%dtjyofJ zk3f_yOJ0-ss7;~c7zJ{{VTx6m>x8Ww#P?*gs=ZsElCjmp^yi21T4A=k8F!%#fp@iu zo0K}|h)-tf5W9vb!!uC%BY!TEoHYyKN&*``wZJ{YeQ6G;JesF-tYY zrk{Q-M2@ZaGS#~{@;6GTbe%P`=ciwWNC`kAGD8j8MtE~g+Vu6Mo{sAAzi1sULn%ER zm{Cp7mMoZ4NC;MS!_tehb^};rvC-Q|Qq@Pk(wa`MhK&~HtuMCuDx;&Wu>r$c3d$mx zi}SW0JP_bq6aY`}&<>7&96tC=(-3@_!GbDVxL;mSJ>z+zMk^EF%Ea>pdLuFPeu+Iy znH#a9F@GyGVRw$2%E#<^eakUAjydIyhRt#NxW9m{=AhJ`?yd;Ag>L4_2=(vyLE?%x z`t1T9iaczwk4WeYo>3sICqP`?WFr+XmRmuZ8k7}o_a(>LX*ieaX5k8O-{| z-P7>9j2#A@cM}MQIz>ic)xfz7R@b*B-u4$E!{zR!84fKTC$4d^ybMdJ>!W{zGS!AJ8iIDGD;UyEvSZ5uEFvV~0txF9VJPYkuwaxtemC=+Av9Uz-qnmkgBHrnRj6Qi^ zTE+9ovwHq&vUxZ+BPJ8Q)!TW`waHhR9DX~M zl{4G*%w#qei-CJMr1#*nWuGTf=qMQPNu-i%|}Bk^yN1q3j(rnf~OiChrb!cf;!N%|S-;@>3Dsf{pzEhM!nj&`>} zdMX3=hTf~@K0z;zw=V#<02I8pJ+Ocn)taSap=n_bsm(ev5jl^GuP$jCdPqB~+3}~V zy^2_cJ^(@|ZCYzAlxxj!?Z69Qx@W1NpmaG|1I@Ja)DQ1a6R}4o0e;qd>}!qJvI9ak zjmAs?2Z~()7unwjgi-^IUx420QUdVz)cv#*v?g&`iGG`^95V?6cL9Sx_H<(aDRB}K zlgBPqp#d~55yg&<<+e!}0Fg96LbkEese{~&1DQ4n zoU9}qeVo9_^e`pm0s|4zNO9)PEe6|Hmmh}){lm&ca*mYVPOO$~n&0H>6Lzf$DPML? zzCPq_x|b~2w*(}_Cl42U*^{B#P`HOlQu(yRK1$J#a@HkkISIym6UHO|VjHT^n__=o=#0eY0c&+oR_ zd3(wljrA1;mkHs*CRp;%qhKYT-~LcN;y4`t5b8QI4ZG1jPbHR0DbvDdYvPhK7r9#}I&N?EZ*9bK+Dc`# z27`)qa!mnn#3Vd)&ZX(gJ%Mo|v=0Y6tJB0-FA(u2Zyi54J9hS|I7PwUX~Km^NJ60} zM@l>}=DJ!VflqrPG{oUEKB>~Z!>ElQGGU&t#R;II7kkd(bON+r#JU=i*D0txkPh;Oaf}-wLKJf@--GwI< z@}jwi4XZzGYYd$^u7er^og+*0%g>JEC|O>N@I~^D%u|)VIoAYzKYLY78n9@ZAb)k8 zESVwq=S^N-=+K^pS=D%f(GGzMe|%(4O-OQqgnEOQ-OKP-eJ^b|Lef%kEZ%>60ov>= zjp%h{Pg1T+>3GhXO8Ckfi+r+!vny<~scQV_Jeo%Ka&}<2y_F=!7Aif^F z2LOtct-hExdGCRHsrL?DaXb5M+fKPD7_PSzR2G&^%DR5(w!io%{f;h0y{_@+kn3oF z!XE=deO-Q63k$*f&U-#}&D^8>};?DsoGkop0RC7TVIY1>nL>(~8Em zYU7C?T+T-}H*8&eX+GR@jmY-IqsD_m$Rh%O3<*nK_Jm5G&3S_m`aoKn zPrKb#OOqYYIa`wz{zuH_Rqx1Gk!RTr#OC{+tRN4MgVWPKn+9g*L#53N4)1m!VJI_5dNf%NIo7%M`<4@0c6YU|$`}#uyM2wRH%!@i%=-x3Y&&}8eZ53>x}>%dmk10m&JIC@$0;lV2to-x8R ztMz9Z&bniMnWqXLk&{y|BCb^#=nqnbK1S~@rTmf9!8Avl%I%kDAF4F=#);Xo(&rog zs)o9>^a=9#B(?~u7*hw+)s^L!2?>&iFpr8$l&8_rt!v0BA}ORJ;Zj^yk{3F3{GN@? zsp{f*`1nt$=QRusy_y1YO1rYSKYIeWLbb-_VqGljm`CEdJh?amHvY+g7kfOxZu{=B z^MS*D{uPt@%`#d&4^K<2DamM-nLN34^O5A4mm>e23r}&~l=G?qo zn$C>((KNntw@S;xdo%mFPD4vqb94dqd+ZA943%+9?c;L1JgyhkK|~LCOLtVr)_X5P z$9NTmr<|@0J^O|B3dX36&Vya#ooFiYopgdV(h+lSxZr5rJ1gA)huiC!PLvV zu_pe07k0>puSKNQR;k)1C2Ry6tmM<~W+xR;>TOTw%sn@8{s=lGAw`MgvljNpeu@y; zDScTT+CV>xj8#n47QR51E`2#Qk${#GKNHlfd|NfNRBLtHkG7I%QbuN0K zh8WGk(Q)9uwr)p7uAw>BihH6d$ygTkD@5XEdW?FOM?Gj*XQZ!869G;Y263UrVX&>s zXmDdB>v+kMN}a*2Up%1n4#b#pGdo?`im_8Q)$@Fcw~R}KOg(>jzc4;%5qj(%+q#== z4qzTfcXe{)~OU_^6R60|aeBRuM1G$2y zerrp3ed0mFLsqT6SkSOgovk{!vzUFX@8Y52aMSA05jK{-RzG`d zSu~(2ddhxeUQ|)Ab9On{%@bjS*1AiK#xy;?0^_b*l6iag#;xb)nBKO%_pa*K_?P<_^D88Tc4 zHzh-1fVJJV)i^y?KBY*j1^-8%uF7QTHe^JxWRUdH@so!>)lr5;Cex$?Rl=s&7-^k< zOOWlsE{j1d5&yzB&3=56M+H7AszF2ERL!wv+qRCE`?HEmf7eF_yWl2V6?y0;$F8?K zzOheLbbtKXr;H~Qf3kk925H%i({yh@k7<1(0X^^%aWBY#l4F*Wrf;Va+Xpu3(KGUh zhepZ~yBPp6$`~*Z=H}%A4w+D<%b3Ilp?#FOk8ZQ5X@Ibo(lA8se-A^P z<~smG!9xM81%SK^lo*y{ywI&z>xIut@l8lgwCD6I>q3f4OnPfb`Yo!y_>DH&s5ENO zsQ{&Mpuo4`mMA6m1(0UEU5{O9Q=5KsGo7vTM?O(Vr%9)xrHV4M0G_g3@&fz0p5{b8 z8!kY*#0*9_7j5vf zN*4euT*p=`;=SC;9O)M6brlP0q7DtP>1z3KpSQJdJiQ5(?0E`5nFFc_^ zMF)zLsJxSVTtw(O_{j_Hi_;R-tVw{011OwCxqZgCX?R#nO6J2Jop{qL^xE*-{U@*7 zc?Ce>iY^-jNYxh6*89%tUW0G)*xALL@&-!b(ebH)qZVYn@m&H&Fu)3nNnt-)%7r+> zx74IAZrv$Z&>kaR6a!EkfcF8TQk%EjtD%BV6}fCHDW+5V=UX6*a{TLv z=wJ7A7eDL_zmT>+>q1jV`|93E_#g(YdW0qWtPP>N+U)GmMM5r12-O@|W7L;>*xD}% zSGnyHRdp-&+5#X{Z7+P6XKhDAxV7!c#ZO2^338xsai(9$BjbKQ44G)ut4y9ROD3T2 z?P4qZMwPZfKjm9TqIn);EC|k4pO{$`Q6*)tyru5<62USObessIc+)*7%7$H`n$hq9 zF)2x102iYI@qOJe8dAIbx94$c^HoKW(JU}eOyTaeLc23!gxDeFpL8HSLk7mje?IB7 zc%|2n4g_zH3`xJ1Pl{BMCwBUwAPb(wsoayl509qN+6*8u zB}LDF1i$E5l!1_@!6LcCh%?q^3_bEFM(QGJaT<<$xk#i^H1O3Dn!&3eU;}yKfL2B#wf* zmGgX)t-sn~DO{fad8K2bj{zUSMR3D1fmhmiJXn0(_385Xng!}UQoJ7JNsyu=+8Ziy zsOu>Ipuu~T5RZ0-9%NC-r_poBUVf(aHYp5gpl)vc)N$+2z;#YFdgKt}Cbdpjju%Qx zYKa@Vozd=Z?%K0ONYQdV#xc^hKc>c?2oL>@bDS~H?L1_ic0sL?aov)fYa@#G<$KMx zmVFWuTH1AF$lkB(N*iJ-?#ZAU_ zgt|Kfe(-orKY69RYl0@+V8DmU}q> zW%xd}zLg%C6werbzjgh$5dPQjA;zfr-KfA6qN;mKvn!iEemt^Z#KCmi_+Z$_**7^q zIWW1J^vAW)tySP|FFMF+M3N7z#6 zv-e!*h*6|4teH$oF3b>gtUO48y@s1KV@#-&Gg-AglH2byMheBS3;S8_S@!Q-N%&F+9eKZ$Gpnb@f@vU}^ zm$3HxF5StQ$)40w&SUPH$b+r?X`uKI6fpi;Zn%f`D16dmR%0ITb97_kQ~R0=)^^5C zUhu_0k(Hlp;mguh_!90ot5em5XTE0#)%6+#4%?6N{L!yc)Rb=hc^UL!P$HjN3!im#C*8-Hkz?#z@{5Ea4CMaQfe0hN^wQIIo?$H$$>?WzNxhRUsQgxH`_72Kh8i$khhI|WSX zpu@t|_Vh>kSB(o*Wl`B;#eqvPg>*!>YOg%l{_LqDEHMuzAO8^2cKTDn*p#Xy@Jw6C z&(hcipM&R5LeP7%xH$5PetlpkJA-u^GBONpu*+OPM*fQH$UO$)MNxL9tYt}g(sC@= z)2dW<{9^pdrirU*oglxF>wy)t;fiT@(Y+FfiaPB__62-|4<}2%~uoY z@Z9BR$=AtTd@N*t`tB^8_^3r;2zeCgy6MD*nuI;_HJxUOJw5GbS0BuBYgU%W!g-!5 z+|mLURWxJSG;D!-R>vOXdB=ey@6}6+okZoH&?O!!&gd9_)yKEAG12|Q11nAVb2kD5Q8*{*gr2#w!$cagR4_(t}L$jW!?I1c+ej>Z|?-c$O>^F@?N z&+y2jl*N^LwETRec>?)2eri}U<<_2i8`%B2TmKmXwXK1+`Wxi}s4kT|SnHeduBpb5 zltf#5K=fH{jq4s;{gW7R`@`ARZBC-{#8G?Svk?7d-!M%g=d58XR#*VN&jm@_W3NXa z=yT3yT0Y#dS3^?i1%7Vs=ZlU6B)4LjQjaoxgIV+?ymwpFtKxmVvrZhT!18) z%{?z2%1bV9ZGrO4Ui<6#%1VQ`Y+HILhka)F=giyrG8f=G)+psBkbeN{PdDI+dkcLR zk|N!~D%6d8t>5WqVpjMKTeim^z<~L^!Nu}`Z|4hjco`0j{b=flZ6P942h|#tx8pXu zluViwP{;*RDrz|bfPPVg5#H;Y`Q&CSlGV*nfSEfvKQbb~0j2)CO;WF@5iCe#2yiZt zY?_2a{NV`av`*{UnoD(CVHtC$#R;Zl z1l_m%B$I!>q>=2i(c|SV^gxU+^@$?4{c3t5^P&fTg`}cmGBv;Nu{1yVc0@zae3E=q z??DTPE)jIMe)nGC)IGI7%%|`9#2ZB5a*p_dB+*aleuLmg#JkE?JWO|K-e<(sxPbtT6~2p)f&vGT94UBTn)B?3SUjkhtccs5FS&YiaSk zAI^WQkSto(78)i5v+$^XuYy~)RhqPOg(nmvYiykatvD44n2cfjfL|0YN$DtF0wj_f zR24~SvskpV^euUlbSsR##PB7(X9cyT;f~sses#C{H|>6s@Nsvc-eQE3f=72NIEtF| zP?6?|{dEJH1HYle)BKn_GA?Y=HZL)SgelLk*R3BajsUsp3oehhtTPZBkSLv%q+As2pQ)8PFc>2*=0di7 zk*IO3@-OvxS#28&Z(Y7pqAIm#$=zJm`J`;$H%5CUPI8KBjaps=62S$94|#NCF54nPx5AJF~vI1P19GXlObs1in$%}M?x}OjhF$3Bu&7IJs9V6?%3$;cj3uG~eo8KnIvD67 zg#a#aq#xXUYk)Vzq5qb<(Qz|PFL4QA>*2NAECBTfZsdI=bAE4P9A_AS(7NY*S4=1d z-~B-gReHQ9G)PiR0d;i6uR-Hrp5LVW@J z5BYuX!8_@}LEIFpNrjbYfqt*EUxB(?-9DuY15S;t-Oqj(!T2*r1SGwNH!Cu1vb3L< zMqLPi1l47c`24-yy*W4tt9b$8^ZQ7>n)$z2@^4<`bY_Z~{sL$TkT0V`U?n66q)NSw zRdZ-`RCH(RTgQRHfsc5o3cs~Gn$ylMaov47eaqqg>KDLEV|HIS8BzQJ3@k;;o;K^N za+>N!ar&FEn}O7lgXBwZydT)2*QN?xb-BMA@cXZPtI2QyP$*DjWj>TjeQZqZS9x4e zZp*fju{~J`>L=8@?2)2XA%yIWZPQ4$k8Fn+K13Cm?y=o0p6>PLIo?gjl0WM0u{~zB zT0LlU)3je4?8WD_^4=?1^Fwc*p>SC)Jlc8R`?cjwQV%@-yQmHq|SYNZ%Vt z9-AZ8267Whz$YTk-A^=@L`4x;e+4G!OQa`&3Z*@CUJ&HWKuw98xEcdbN7Ti-+DZG6 z<@0T;_{Z(d8d}q@6`7tO)D&*ZBB0K*iA_>$86o#Lx@dVu!-*5~R&W z%DNT{0Ai#W&d~9}0qAtO{0c?OlB)cb8FqRAaGNTU2Q8h@Tk9PTNy~EkgCW#apN}3Q zRK2ouitPn~i(^PC4a$mQRA6+hO*Qp_{DR~{A3$v+%VV*!`~aQ2-kY6SM1*vV#Le8O zC^F&bCe-iMToU${zR19ZSIkaao|q1-XbZI(lGlaVuDaQ8Kb%jfW>j3@*=qv?(hY^E z4Ep0^1dmF)_s?qAqr&1Ms??b-09#k3GK2N>D6V z)Xxv3#EOYJAgJlh-m3p4G625F#;AFz1av>_(yV0J&;Vh{pN%B!aB8$b^6BI5170IB z*zt=4Z2MBSjS2i|PX&*g9rWMp$(apP?^w^r1SrBBa3<#$2H==UGr8wSc^9C7O%`xM z8n+no1WY4y{{LnPfC2(969CSKZIj*0D>zvI9QnSO-Jw6b&5};z9+H*!-}QCbzLO`D z8cqxZ8yK(Vx&x?%%ad42BBf`2%mO82SBBp(OoCPeLE+gb8;yPrb$m_rEK;i?}0KED+N}M zK>u+9fJfhFSxbfK;~}hc+F0Qb0$Hv`LPPo4=jN1`Z|HvP;pKsA)Oqydpp?w#jaZmh zUEd-|1YX0NTmDuVI8-rcuAA@1nX{o;nw7Y=39EO`Yh8-7XqNOiJ3(EAR=~U1(yVIE zH7esY4RyjT%^MKnR>=v!`Q2f}6tqU#jqK6MZ0 zHmG4J)6Y2%(=lTb7AiZm_)JPTw9^yka;&GeGqaJJHnt%kqc-c?aWIrDcf(|qn zqo`8HN#xP+Uk(}J1%|@)hV~j34^#}ufYIrR%2B!5_sMejvvtp&>uk!~)JLsHhg>1X z1iSdU78tU9*WO1D4zywczFnA5Z89&?Bl=^0e!{CzNSdyH{Dh{q}3#e9Wc zCyLu6&xSVka&K%pJnyK^MmI%AyN5LIZTq{% z9k3~{OoDp(GT)7A$B$@1X##c$(pKa0d?}6>0V)NRud+*d@g`|5`GuU*A^0lpwxnIb zHWP+E?b`AcDp2n&;eAh;RV%vZ`OjsKyDrvl4rMngC*Z>sNUi5LS)Yuwg70b=nie*v zq8SIAV6zSD={hSze>S{}qA&mU0z6vzCvRGUkO{6`T z3vUE}7IC)o=|8=NRH<5$>Xzl3sfD%GY9Wj5-6N`eom&(!b}r6nRwDGS7eAV1FV445 zYY7<7vyG;K9_h=9mvBpN2vIelhZbe` z*9Ou)m-umX794x|e!KI`)bPUi4;h{~?V9gUUEtTpwSvvZwhG7F z*`JNSTC0TrTB<%Co+Hh7CokE1=QUlbYwMR^#7R%L&v0Jnrq1Yg6rMV|&ou?L7yZq- zUJq9k*Yn31W0~BqY@MC!ic4fa!?r)HoLvp;J}dY(H2=;{(wR&w%+ENy$wm-2@%(~xTcb#tuO^bX5G@N&sJoQgOH9qlC(Rz{#c>G@=^ zZd$P*Uw?>2nu;9W^A$c*?Ah+{SuXpoqQt>lyL_th zoFO3fcRpGwb;izg>_Zjsza#07(@3jj{yJx-Dn}WG& zLYVy6scYrA65h$LOO?(HAUSg1_kS}*v)I&LIJu#NwJN-hFV9{NDM6tA0E3Jb%I)(c z6lgcPC|7GK&Uzk+;}!N@*tSNw){^q4D7lHZLu}Rot(98C%D$SJn*vpSZJB|%XsQFa zQ;E@xz{&7l-{KG?u_b$)z#SzsIJT0yb9zNcEwFI`Lx1~mOs~R&ubNITTW?;{LGxWz zzb9wN5rQSz16(0H#0<36rw06v{JhM3{Ps6Xg}PyAbDm2q#FxRcKlKRjI@OzJT<&U= zhx&;iJ#?q>t!46fsLW zngw+gX-~(eU`HpzKc9(b*3~7yO@XO=YXMR;%M1> zw^oy&+}%BugVi8ankGH@z3}+26l4}kTjCZA<IC9@4&u z>=czg!p8j2S}iBbHwRVan#FiSX3hE5rV6cXX;vtB4?Hp1Wm#h^w#6+#&M7zPXBzY~ zy~Tp(7A)?y6unThrTF}1q8d%;G*dW!T6vk-a$Aulp;EfIXJqn?@U&Oo4(|WQ-g|~M zm3?oZI2Hy39YsVzK&44ndPhO&5L)O(l$Jo~y;?y*K)SThTLMH%LI)L*PJmFPgOt#F zhrr#;IP?2(zueFF`JV@PG&yJQefD1EUGG|FpDDwhTSeHmo+Luha-IkYU%lN$utL>u z@8Qv{61-JkqmAgx7Lc(%IX>wNTo4K08P+fg)euoQVTY;UHK$o#W4WI4xai$_dY0PFoZPdwAmrrM%>IV zEL2YtL1+WX_4ck*QK{-b>z;16sP4i99$~-JR;EjTukUA6u}dM%xKW%%iwGhiC?5zl z>KoKZv-F{0P~=yBXZ=*XjjxNEVvDXea;8N{ zjn?NarT87T*mjRQ`U*WS*VdYyXvsr`tKqoWh-3_f6^&4(Ur7;zPm0X_qqB*E_Te%D z2P{<%bM8j>eO?ybbPFrO)R@tG)v`?xu#BQZk~X4{+aYMsAjb0cV?}h7uy7Qym7g5f$6U_d|azu z(0qh_f`?%jY?tzW*i^rx1^TT98Fvvw0Thj1H^vbCn-?!u_bp$VvKSSp&p-x8@{m6{~cC zx1*n1$P2`TBGE>o?;H8~&`k6L#-bI?5^bDBOHXwTV0K zT|SpK+kgw`kv>LN$=d-Lbd4&~#y3%0=E)h3w8$qc&};Xi#&_&=*Y7=<(o$!ZGUd#^ zwthEckA?E!)jWj*r{w7J(EOp^?ECPY$JJ89I(7w69Bff@>*FoK+MC&kA+F1@Xfe%# zcXZtqJ3@BtW38+SyD#HmyK^$(Yg#)&|eVbUzeaQAvQZPQ?xaeo8h8_JSRTtsy@L}Q1 z0S%7r&+oU5{K7bvN7LN-laHGGOX-nAX|>LfXc;}sU@b|cU9A}_l)pYLd(=rA5N1%& z=-Qy&W?g8w3Y%=Q@jT2fNr=B8TE4aRU1d;;x8@E{BLnt;+|Gqg?2<&lNL&Zp%e88O zn7YSOH{0){ep$JL-(HhI?T(V3@%;MPnqSE?!{KDzrLa;d@RKOXK8)Tdt0d_<`gku| zJ@VJPth_`I5f9kwBEi;~<%JGC(z39&U$scVa6oJBZjm`U$hsv7XAsJlW>W(XNMV1Q zHFuF4dsM0FUB7jWk5Av{bpwLVqdHY93@$6BjLaB1il&7(xAZNJk=%y9y+svg&$*(@ zT-wf?s7e-WC`nY<4Gr#xOfTTt28|*XTc=FD9BLOrvghJ1AW%gE-?|gx$QdkqmQ`5`aXfid`d~3yb_G&T`GZeCd;>vd6Xhi@ z>5+^u$ycvV9Meh4Iy3cSd#vO9Vev%@4}Y0dg)sVT3L5r%H>f-gOacPxhK7d9yf>{$ zla~WEA3b_mRPl>)&|~1_-SWlsQnxFKeFBpkKYNN34P>;q%?gA#SMC62|eQ-`$$AKho*lVzwrYL zppe(7w6xNc;&==d4iG@+0Zw0-D1ObgL6_fUo)=O>Iha!j(KyHg>U)X|81+Z)47kjA z9s#2@5wmDbpTpG<8aIt-c+4ydi!ea%X(usv(H-Pq@Vj0_Q%JYr;{oCBZ>Dav+Nb)xoUgN4AI6$fq zdf${#40%7zRiF#&AB(H4a%gtKVWpYpV5idI%(lE4cS+%LtB@L_n8!op3@1LGqsdJO z=VtFAC%Ne^$NblLIu^33`*nohP9eC-XLsQ(s_-Zu>7_cfy(EFCaSX9eo@q1lAyATj z5vR7c9Rtkm%*S#?lo6};eO1g3;5R^C-g+#b|D>GXZgOsA)N^QOxtAeHWNw}L$WuD- zOW()+%Zt^4-r_5NdFJ=pwoFB!ELKL1wkv)5j`p;8>xxv&Tr)+a>U2X>?X|srxD+1s zWFgJwD&Jb8cY;3mOOK|_l#jFdyCi<=t{pnoevPY*TnUxhWKOZ*JH;{0GKiR66?#mqdFpONcVRLeG0?Sv0sb{JJURVB(` z4KC*P<&A`*yib^xVIip5=v@?ULym8HD+ixaSkk1QKfk&t%pXt=x08BeykiS_o=HC@ zR+l+&KIpx0AXaX-D5)?QSD-seO^%%=YzkGj>$n9*<7z(}4gXRxR}e*MRk_Qi6stu9 zz+W#gjTcyL+Mwr=jVMOONTZ!R=fewyzLNg!u8uGCvj<%{T6`@^`~9&Rs=NH!>UoCh z!whBnu?pTE2gQr8vSrd`{eW$+|#+vNT>iq|+-n78Tdtd1AT z|LNQ7()t)-{8-`O0HSA%wrB3*jq;XJ`RAmr~F-*^{zl2`td^_qun%kI}Rw`d<5cl~Ymb%)gx^W}V#*_H=?|w?>2sThpceYs205h9n6R$P5A8u1%Is!eZFLb^#>_G~ zkYjA;b8WjB>0EWLB=a}iw;aX|4G-h}e-ZB8yVte1KIOhP@km$%>E(_uGOh0I?ak6R z_lLfJEH~8@F_8Ai)K^m@yT`X_yEo@uW!&a_A}~QAe>_1k8ln^AZQyR*wx)u6^vVDb zEVhPlW8Dt4`@-@G!HNL}-gQ#67bu~$`K=x^)(44#UtOElH>5-%aTcecmnB!is1X0e z#pco)x9uQSf*J(MARu%v@S}Z$p7C9hTFf1JfTobkFtcqb(+3lj;&IVq-|;JBLTK10 zdZ?7jra~v7HzZ%Yl6upBe{d*t*0z+Y;p1}&t}Qxv8*{m3b%Edjn-2e7D8uQoM-&n^BV#)(fv}}d^WL;^3z(0Re&R05xN1JM`2h(R+wFBZj*-OvN;T_S z#7pM~%a%XXA-4(tnPbW?lg?Itlu%c{y>$K-0?*f-7eX1!9EeU9Q{9x^Wt3)4O~(hU z!lZXIhoPgx9PYlKL>=DuI|P`uIflTubAiRdf2@aBtsK{Dk2c_Khh2B=jB-4ltzO_Y zuF&KbQDhUgTc#uCt*9>cvgYqs2J4zv8=yT^LqryeC5iJZxt$v&!{r08`EN$jZbt?T zbwZ|_GIerE##uSJai}7k8G0=0nqv=25;WmDpI^QlF`n%$%F{PQipoeT`_=DVb(k9# z)mpX}jCMacqW6~6vm1<$gEbP=x#~#|3VokeNj<@mUe43&Rm%-SXo|cForMTgr8wKcu)s*+c2I=DGySImF)oXH~smZyGvg#+=y<5}IIt;fy((f?W^ExJF zRtMtrwRMLBH8I}iCb>DM<3_j&kvgI95Y=wRi2Ri*Z&cvZ*E@As1<$~WhUGxKTSxV> zL*bBf&$Az+Xd5#l^r5VXsgG4`TtJCg=&E6nFy^r^Y_i6!wv`YH6+SL#b4PhDN*Z5(0Z>SNrV7mIjS>v?XST*BVX{hslP~{eCM{yP1D4tGLmQy-x>q1ds0M zS%z?krElUE_5$MIB+hPK2oeV2BbbIR=$_15`Ph>@7z+}MT5L$b+;s#RW5mr(3TvG? z4;C^}=)!>zb47yP!Lt-=*{+Y5o|qft3=M~4l%qsXvXe6?SgErZp5*DQcb0MB4b=x zVC({l-EN-$cq(SKaUQ#Eggee^%5xZnG@wY2UegDBevEy+17B|NL?2IuVz;=Jrq5nPmn~qd(d<>q+{^~3;DC{|*#+P2%aBoY3+|f#jzixE z%mAu9dpRO?((bBbFA9Pud<%{UR7aZTh==w>nv?oTj-qB2lfKd|`wcV#%gqG)-sWO{oQ?Gg@2AEcDWbD5U*43|8 zqt;ShS2yw?NR4h26i)GUwNFR&CfJmOZP~1l4eJT(TVJ(tToEWdLJy)s90pJrzU~s{ z0w@7SAaV8aC-ONyuDy<5KBjqZQm78&CX70KRGJewbyNCeYDM1beDLwME95dcm}93}kvg1C_0AHZs2m4xfUFD@y6-OTZZrL~_nabo? z8Pe4hjeS!}u^ay300Qt<0bXq5aPQC<3M1^NzHe5C{JHT4!0tI38rgKi*iryWRbFS) zf0EV2oXorLBkECip$Gk{<2{NBfse~EwWj)dCi&xaC&v3csi&8nkd9C0FAn%UlJ%Pj zJ{XBjCCJttL~v2ZqQr*^&U`)N!mCdn&e%0F>582V6RtwEBTg}N5xrL|9Bqen9#9w+ zpYJp=8!Z4)ODeUIB>X+rb{L6B6nC?ZkJzr>4a~)PC3!H973R#ccifWDfz+9!B^DzB*YV<@CFp=A_ z1B;5r0a;dp@$-W9dmoOi560(mF~_*A_aXZwezQCFBZ2q2}zyGYC^g3?3^Qa`?7%Sey!Q%;AHs|lA zIT>V!juoA3htekMcvZDky3u>{XEPXAi30Nd%g?X6Afi2S)!#A}SR9jS2Mkb+#6*-Ha~ zpKqRMwB(|MBEpO_1lngx}m^MFo zxn=3@!DSlshsVTsi4R-z{x}fiTA~%@>ONX#)oUaWEOUDO;~+Wh_MW>Fuf777OyZVg zvT6PJPMc6qZn3(tJY^67k)@iDIxFnNPgwA%k zEIKT3$A8)DkkfxV3G1`s>-w|hhH1$@2O4YhwUw|%lc^uFI}!Ap>>#3cv23A|o;Toq z*G;nN&My6c9aaAsx$p-&rAFIJHu%MYeHLrm?sqqh-0s9jFj*y9m;1!yt&jHB2h+T! znch)J=t--2bV?;&RN(IlYjDtgV)oW#xZ^@&l+bOl z+tQr&S@PW<`C03y@DA)YH{(Dy!|*<7s9f#o!l!VRKiE{*qdC4A$ODqJO@J3_2L z5yY}L@y``JKT3FL7}WC;YjN)vMAk04P%*Nb^FyaS)%>S$Kvurgzuorv+PY#7C--qP)iW>xnKC*J;*7hdYOB>8(4c)rld_o~Dnue?0-9e@kRn z9znghg&{{_=Ys2dUGXW$%QohpqKYPLNAtEJs}l7WOH`r0<2J%gvDe14{SQ0M%DW>a zXE57^^?7mJuz{gN_wiEV&z&12g0bCJS2Uv;@@Q6t*B51?x{~nY9530Hv6k@BvOL_j zrDnW(ey<}Rf^E^bVx+tlm!MY}QCCB?jfcx1yn4S9DvRuoJ#5^e}+fs}UXk(!%+(flM0*SPuP#E5|2`Ehc?iM3H3xFu;U#Me*5sx@N%X=^ zpgHoh?Qcr%$OjDb%Ljp!nmR-`Df|d0%b?6aZlDfoGqop0DfzlsgY0>bW?*xr7;rBu(bU#N%95^1_d&KO+MVycy}8 zb60A-O^6Gj;goc+9}(pX`~4!dB>}Y_0SB8XnSCYMdR5W-1B%*;BOV5sYjN<0n%Wr; zk;5q*;wI{(q~Nl=NfFXkp{Y^%(qf{hRW=+Sc%rrSgtxwXND-+eriPy`sIgo;1H@jY?)5|`sX_Y4ig!vlg@i06?nwUo>gbQ2dfpj9_zHpV;XxGXh z(w;j-%4$I@=t&Ul>DWW40vB|G))Gy}C;DFZZng7R+N(#cn={$Db%mYENmf}6gK965 zDdq(&+}OqVF2=Ul98M1+FPtM+QW;3OApqn${|j%kMnaMR$Og<)xVE+QXU3dUEji-? zvKp&IkWj$aZ-T*{QMxjai#9o_-)#@^?nZ}0 z^N^j~RWHoze`^64CEfbH=XO0{`e;_qtRL>XeT83d9<**q5|Sua3^9jwIJ0)?Od5;{ z6y8-5F#uaX## zV!Djvg@7cmxU~eCOr_*d@h$2OvCC`(Fnh*Q^-SsvaUqRV#dRLAj(2x+ofy7m$o*#RFxXl8SNdUL-Q(ErbA=~^tcWl|wUi>)OEuBnp( zU@xZ*!ugim=zK)8+JZ{!jx2G%ho+1(J4Pkjviall9MP1v&6yVL=+ZU&)13LTz53%O zzR9+?Rz_TFlrd^-v1)9q~MIu4;aSyiJAi!tt4?} zVmK#-direL3#ywS(I)1L1h8I!O8%_eS&%Zn)u!xt>E7Q8qj$)YdW%~++u=sgtGcHm zOL%+APlIarOCz6J>ofqXWHQ|MVuCCdXc9z%%%)rwFIB6xvUNUDbZ;cLGqqMzl-bd0 zaC^%KVK3eB-l%+GxV*jh$hR&tRi+v$IX;%_@9P0;3SHbL;oAooWe4+*>|`;qZFcxN z@rj$FH`)f051D#0zDE5zESogmg?C62%_PA~ILOEx2{AD% z?^jfX8G6{Lkky3^3Z66G^XuQf?!Si2tBS;|2znqyjT`RB15q0^ds<3o-ceetF3E6g zU1U64|Cz2+FVYevq?Rzq^`U+F>dveJpD7( zq1~F5TAT`v^?1&zW5;sGxxc5mlb5i$R8@2oP#@JY%~m*~l&!78dUa#pZ$ku6^kTAiuZU-zFeS%$K73*B)3i{9z~GJH4N&5 z=_oYZ7pi#r8Q{txn&l12W1XT*kWB@Ir*3!N((H!>GMP>4-u0U?FZdY^Qb!mN&CN&* zL6EQMM3g~laVvFk&l%*&5kr&`kHYwv09w+GG>??N;H0r5=D)+7+HX_sbI^L;XuX~; z`OwWFKWEG>7yR1n_D}td1iAgQ8EjhWVcEye2U=H5fnuIY)IBvq%#;{F_y`0dq-m;z zXEB5POG4M6kh(XLBNac83m3^UlUh%v;Nc}i*;PkHB5Q=Hb*}J79sgp8`mpuGt7wL6 zykh{AmxcMI&Tw(c)SFM+aK zCd}U+y|1C0{rBL>$fWP+luvrk-&%eKk+G2@>T>nLhW1@L?&GrtI0C}wn{DWdgmE)+DAQ|+$!S-u4%zGj z%gnXG4gSj2X^Pkuh3K|?jJ&kWM(cHc9%)U`a!wPgT~AAjC)L=c)btHeaE#AO>bYC; z@joRmalf5FFOOV-u*Z+P0YZ~rytnnY>6HMGUAlLX9}oJO7wV*^&csPE&^ofJ7M{01;s$9_wx zxuOGt8akJq6FA$rb?TD%Vn`EfmsO&`xFu442=Ib(BT$S8GMIzN>Gti;8rc%tEMOW> z>m0aq0L(hAmRfo%nR0whfjAT$4yqvPk^pLJm9qkQ{WzFa!<-?s)UsLA+sxZT=g~it zo+di~bNd$M+i7nk7&~~zM5BS&p5@AOJ=)5U^1^FC)&L%v^!jp>6 z)#b_tKmGlBSS;3xBSz41G*XQ{R*juDA~#ruu}KSvkZy6e{zhB$_Rz$jfQGJ!5{J)Q zNfbzUOsEFuaxPX5*$-)2!R+$mhF8P@sbDDotx*av@#?Qc)x+TowcKU`aaCDh6<|gvC zMJ-l^G(LknI_mrS`sU_a&p(DL&iVml6Fy~mf<;Ig6S|qVS||2n7{-+b9wy(q8$5by z`?F}=Zv|1c%yq4gKhy2~T(S{eTf3%qOZvY_{Zp}^EhraBC=qZo3GB^L3?RifD+D;ImewOfWFO>$dyyS5@%?X&Q_3Wvca#Fm`(` zQa%;$;$#h$d7$x6YTkvHF>1Xu?wT(
_xMTy>DUnsgD|Vj59s`5^N{v{?WJ zxO9uW7O>NfhY(%3+tV9AV4_q)F+~&ispMHg6l~=N->4R=fwLycaJ9!d&x$_{4Gm+r zw?%z@#KEUu*!t{V6VqrwnzQe1A1J%*^r}*;4T60FNdKn5HK#vXH&gbdPETy?|Lhz( z<|tEewj;Rj^Li}O?9}uAJ#25y2`GDhm>z6L5A#U?2h!fPhoIn~+1*oiboI0L(O`tQOe>^E^ce5$IBYzrhBlNX{o%XXnR9593T*X`kCyDhf z)(>(LO0H3!zeah%ayZWH5T7Dl%LBfeh${B=Ib-vy)GuX>S^It#;V=%C&5P-7q5eA+!YZgcQePl$seniF$c zWlsq_Dmiwu?S0b}P`Q5;8DBU4z)OURg+am?M2mQiCg*9faGthje=z)gy@|)pvTPpM zuVaYN_JwUtk5El72!9OWKkMHM24`*$;+l0YlWtqx!oose7FLBON3^(NLG?%!*mE63 zK-c~}H@$mkH7r8Y^>=#{C0Zso`U{!sigV*%s#I&ehE{g1+sXp+>TW_bw|OM zne~i~hi{%GmbR!VF-v-=oj-qmLn@-h=I^P;>jiG9u6B0la3*OKc)M2gb3o~0 z%gfQ(*-xUtSm*rzwRikmhwq7&0{PFI!t=3MVViVUg{HOrpT*AsxhF0S-u?IYv86zg z@X}z91{NF+gK7Et9zLl~R@=K3+W2Sc?-khcCgI>Lu`0mh&aF*0h^M;Aq!{qz$Om7_ z&&yK^ve2x2DU)6+U6i?u7z&)wgz%p?b(>QH0}S2S68O85hs*<**F`&d*UomHnZ2;HFfrHYX#PM#TuG+N?gh z1Q{596RZkFs`Fs=&cgdD@GD*WTk5t1NF`I^V61c9nIHm)KPs78_lhtT`F+DClq3PK z@Wy8n9!mxhrpMixiu@o$^Y51`B)-$xfFlr4AOtPzokX#GT@_wl-uSZFv%gQ`iw&SX z9T0eh5^&=)(_X~}YNyGsG8;V7fxHWd5_tWW%^9)Zr*B;cZOV8o{XD)*8_9xEgHrzy6uu&_~y z03muduYVVrxP_h&XNou#oZbD1TjCjo#^np;^`t`9792u7MFI*~NT1c(-K|CQ6Qk9- zLx*?{)N}AS6~z2^hYuYIj;{a)&o zdisTId8}4a^2#XfIsETwIYZGM*}Q~IN=z)XYP-%T<@H%d2}s`GO~k6eCceC+ zm-Hye=*}DSCNeULyJV?0IRMWskRXL||F3t;-K{_PVl4#*XJkMhJW=tN(-C6<;hAMD|c0Fn}9;Rf^n zeTQ91+o7ni@JWXvASvMfjmaBa6|BvN8G2%RK0@%apxgvaF^z93l8x$8*qmj8s;mp zX80fOfV`DIh$dRw+7>qw|Gk)hUi7Q0=O1G*LJq7aOL=g2+-r)ucJEgxP_kt=iMA3U zmuV$Xq^=HRZ?*oU?sTo8y1IGbEe5Lbz%%^M4snw10z;o^JXtipx+=6T6>#LmDB-4L zjI-C`x!x8qLfnLbeT`f3_yMpUWzU-~u|+G##>aEtx?~=M{ZC#793_zatP^36?joSmC*}7fZ*h{iyl_0KKz^Abh^$LkgmI=~ zgbZdA_^N$BnOFa~K{7Ibjrw`7W;H1Q^O0sMaZ_R?JfRc+&QxlT<0~7^(H21J*bnOg zv>s6if`B&XTa`ts_5Z4jOs;u0G9mvo6uA*}O8l711Afio#voZT?7v>r>#2~Y1F?9b z4tNZLE4IoqKJ{F#RwUuvv9^do6Dy+SCGn?|C(B5)q!(WD3_#Y?%SkbJ2 zct4MVVJB1l7R!w&U5ES2zr9Xc{ha65QnzOd;(z_w?0gIOII!!sNm-jJ>s5d%;z8fH z|9bjwG*0H9I7+xUM*$|bgvT413sxP7&(4lVwl=3>Tx_ZGG%(2E!^d~;4C6p_23%c& z$G;{&ce`!EPz54w!~RiF8lUjj5N`P*Qc1>iE;->d`+w0+{Pr*qkL~1nZ;*jH73*K? zULG5RB?U{c>btQjR^tGIfuCDVk>jiS_iw~3R*9$N{qEw$Sl;*ryf!iHg5WB%w2v-e znFjAm(8$=x1WM%VaveHKG3iS&#b+%&cE5kaQvT6zkt8Y9UqWWnOqPzX##`I~d}o{$ zCw&+feV9s={cxO>;oyWA4b$JIRhjMTdwX?7;9-LX6MU%I|C1XIl8jeekN;`FzA2TE z!#Rwb5eorDabXA}C{3|l`a%+Aj)*IpkQZmKI z=K%c=50}z(ynmcQ*ym|mxF!MQY{}SKI}s#W|2`fD%B_t&Vt4E8MOc0#uAIZ;5T2CD z6#orJ^cN#+vruaJ{{)cMczyhy(LuiBAKU;9@o$!_QViTtfr zWa3ZO{)N`fod0RjLyar{!iWEB`gD%}Z$eII;s1+-oXGlmGBZNGS4k&R&z3GVYWz`n zc0Hz&Ch)@r#&cAE%7x@61Z}f?;`=jA{YAgfwb#YBA8Y)niB$2rxA7O-Cm$LXPvwk1 zUb<&k@ZMkNpw@k}mR77CZ0<~5%W%{9jriS!>h*R~6IIlkb60=fPg@-@GJIye)9$%5 zHh4IcTDjwH_zhr|vYp0C{sz-O$VRTM%=Doo2QK$jW=R zaZLUD0>w3|i5$VM`L2p-#c!q;$li41juCuHXF+FY+*;|(1M)dIa^?<4SsF|!!IyR? zhBPZz$tH5X-uM@-lIi?U$JG9(h4*Ry)6*}y|7r67olN%si`jU=Em>(AD#)zU^X^PY zRON+^)X)>XROjzUM*BlP z0nx^62`s{v50@H=lD~vlVIeBvqjSF+(sm7ac>+wU#|`q%Ws-6$Rda3M2x=&oqNi(} z``kb3LrWD=^_y3$;;I@yY%wcZvc%?ZdIp+R5^@RAv9I z1sIjIETLR*s9Ro@HOf+!|J57VezgyI$y1o@g(H`0#YM6gb}oT&A)YqM$M6}dgaQGK zC1btTs%&aOo8|Ml`$;B(bX?EhnMOti-KCZ^nFC71g9+R7NJ z{#pP1cxsD!Q2jin&ASV$FTN%+g-H^kYP6m)>2GROmRq80 zvZJ@Vmf-umQ5pmA#-+P#F*YsT3#jhIvWKzC7g+;iOHu|mW|s%z%!j^dlhhX{JYs8z zv5-D4biu%fy*(%W-f!fwGbow)XpOl18i$uxqrRD5A)82g$Al-VyIFNZpi1b_(;#&J z!hDZ01Zh zZXC=lE%+#0W}QAhL2ZsIP3JB-EBEslRf@~vGc_mQ#PNKKZ*`uPBMUo&FNq&q9=Lr- z?GS6SBIb4e%5~)+RYRQ=0oMJUqTT|!#bIWJ4AoNRG9gw|2%EU&s;r(NO|k{&*J@bF z0R#izEDo4T)r1FD-%=*TJea;fcY$oL|2BX=W&Gdxe7B%Hp7H9UW#a@6E3QHBg+#M& zpR?in8O0^WtSKW!HBW{ezop-G2-tbBj~dww$F+No{`9~7-FQEwRRc#C`Exq)q-tj= zzjxum4ucWh3*(eK)g*X0gLI#u#J4RtA917U>!r^(7Iu5-J3ffA?z^w;OBG7{Y*N!Y z6Be&yZYGU&AE&%+t&pWMy_>kOy^#Y?K@cri_~hS8j`tze1%;Z2I-`M(D*$8v0%v$N z#yF9CpwEQ6F$>l)RHo|CQ7dzV^3OKG5{pQ&_MB~4CzJaIYjwp2-k2-WQr( zANVOJ6iQF*!}_HHgm;y1xO9vaj7*B&^AegDyV2c|e-rH@oR^g4>dxikk#Z@~3E3Dn zHB8EAjR>x$B>Pg%_*L+QW6kR}=I+{^u4&QYXy}GnY)G87T~D53uPmQlcd;|Q0K4%M zUw*S3Z=hP=-c^J~)Bd1__PpWb`srKT6yXSYDZufe_b;1*j8)1y1;f%|BN7K18Y60` z+;HI@yNHO;q(wNThcDk)$q>*6l?bIu0)UWWh@SHrt10_(~@|oU0B1IxP|9B zB<-LXaNnOrvvgiJ!O6St>DF#N2(uM&Hviz7r;X%_tXBT|o{Zy7zm9#f!iR*6sLS%h zw`Z=X`wa57U={|?%C$9d=FY+?%enSN_f;Yyd#9m&j;^{p$~NW={1ElXS?e6z?=Qp} zf39k)jgW=i5PTA9?top9tRUIZQA5>A{VQx8u_|EBRNv=V{co>N0Qic!LhUE z*fwg_Ph$o`zK1TNzj4glMhY|u84n~34_~s@7(m;5d)Nlns&?-fj7r*Pv3KxjHWtUJ z?s*bzn+KwGesCUQC7?C;A%n6PHRNv6FrbfFm~=GetsHd7omB5ahf^8vWq2jbY#554~A*3)A{}VNpUtKXLCF zP+8{J^7_YLPCd10w1Rakoik+Hm*wdxfMQc*Q6|vc> zuMjURHg$b#9!Do)X@jN_`pf;VCd56JoKk*#m(8H?PIny>hper0FkehjeAXvB|7*V_ zI~?g+)V^ud*@$q{FVHrF?+0w7i=+&#fUIE+77Qw9W%fU5Uk$!6jM*`0q`HBMX=md) z{5)7CWtiQVz_Oh=KRknoIx@~RNq*=JOJT*Ju1n`+S){kHE=SllcH?B8a105$pl#(! zJvKl60Ue03ZESB*QKrK2Kl>HL#p#-jVvwj~Izv`|N=b}+z1|q>KCE^TZvHrp=?^i- z=wOW(=GD69wD_nOCgX{<%SK0*PA<2VOR}f!+B)U3gM$+0#CODNi>)%Pg%*n~86U_u z8gpTJIJJMs<89l*<+7X3GjB5Uc4fu17}d1#>yKG450;zznjzE4HK4XnW|h=^S{VH@ z8uJS}R_AJkzqb5EI;mFRPW5~Ix~ZcN$HL?dIjpa1Mvvb&jL_ikX$t3|T^++@9*%e8 zu#mZzf>@Ny0eF+Pf0bTnQ|`UZ{o0c7jsaXE$;XlCW3}ypw(owf(lSsm&5j<2tLy}_H>~J>Tl-qVAmhD&(!Ia^ zAp65j)K?l}zJ0yELV?cf)m6@voxB~ack2;in=SoiZjp+k|N097J^OCvU1kD@;&^wE zmiBUvcjBIC8i*&e(wx73DH-XX}NCh2)8Rqrzt6kLCv32HJ z?DKqm_^s~SgxwBadp+D!S7KXW%ASc|e>U&hOwCdpLaq+CER&Jx>zVJR@ao7BqwAp( z;^JX))b=1s&+a60AhwUAVtM=sfioMKip(Hy6Xt?a_)~dD44`-%LuOZUm_^nQM81@a znDtHFS^tNI{LqD3(yFa7QZ-EYd@GZn8TZ&l#`G|+j-NHEtF;pH8uC$%MX5YPsE+#Z z`1Gd?R-08qqXZT+S6pr8Xp;54uXA6V*SN$25-{Q{1yM&X^-_?~UAJEb{VmT?2;Dla zU5CEpLpI@*K62wz?-1F_Q_$CxtI={8oArqbA>*Q(#C$p6;>4hkY65z!m;4y%@}1+} z9X@Ti6FJ^uN;^i#FUv)whRrh*O1dC@TQSRg>@r$+O&1r!H%~p%?b;bT2)1bAtv=Cl z4Z@ym*GhBhCwVUQAge-&-=*73ON>|zm*_>^lZ~iWTr%{m$f>DQ&GHHznfo|aGKZuC zD>_&Bv^&2WqALEHQ{b>2>IpMCwsm5>nW;y<`TQus3i9{Rw_PLix zvjmsmZV|enUt5Gp!TqrQ9?HHzSf8}K!eV#U*M}fq5Ah)eb@Ter8(8~g1!!HdS`pJJsyi&QmgjbJ<$Uh9hBJc9aQl=Nj-}?J zb#c{NMaubf{Ff3J$~# z!Y78%ZPOfKnk3U8_tKXB>f8*TuiG|TpIgR;`FzWke_2BGze?QIZwpxYQ=_2FCVSRw zFJpqzPjWMcq@Cg?+pcNhT%^tH(rjJY!jxK2Ks-r%%TlnhVmhdRDdx5hR1Ol>j*PMt z6VBaux1(EE)KSl`Yq7pKRO*V20(O{5Sf)fPP<*4vV|DL!h^T$HB%u}Vh*R&{sbrDnJXNdw=r)J(2w+uCK*+nSS zV$)f@CWkmoFXlFx4>id2WPhlks$3~=8|wJjE&0eWA=3)G2d&SPBbTGYC|pij>P|P? zG0nP{BwfaDAlYwM-%|K0!4>M>qbWD0K&#>2a`Sm~VYxg-1b^2oQE+;#8Y*IgRM8MI z?tG^mb4NMIB-8`88z-CRgs6+-H81Rt0g)QrM>-_aaw29+MVs)MhlU>R6 z>qKtS9mO>%8S9snsUh)`Xr1noIBAoxJ)1H=l#W8QB5mk(^n#q^cYQQ1KIWc_N4m8T zgXCUf#A`&cSWNL<(VrS{7qn&PGpOgZ!zZ+KPCy6dhr{bKC!!X~YMgJGuY>x+LKG|d zL-Rz%;Ezvowe^AVT%wLt$W4!xf&ScAM5$Hf>YQcq9FlPCPzfxPGVr9zW0i*r$&c?_ z>@Y?6oge?|+9_OmgMO=&r;^E~^$Mc|htZ|Hx!t}EVP%}6Yiv^Q!SJg;#kG;$$q&|D z9vfTT&=dcG*F1O~G+n6Yt)6EAD{j4xh!-vk@8uP!VBW&D^nc~QU4)aSfl=2!QUl=~ z6u!B75$oe!r|0NpTAd1s9=*GN8Jf4iKTxtkMpk|mIMsJdh0|T{eD$`O)1G%DB^?OL0;(Y_`z;${!xW_6(PHr#R$80czIxgDasFv8XH%jvT}#- z{pI_)Jd}#f5~J1sQE9$R5$RzaTpVtc*|TI_D+bZeFX&-0OjbFDp>Jb`pHeeFRZ=&r zg}SDZ+@A%Z3mOtDG~1ZURRe%eD(wC08kH-g7%HpMzmVM`YWqzBeRZS%9!uxPX`SUE zL$19wmEv^$VG(G{FsrzB&#Oaq*Xz|ifmfDVrUSNVYX|jmi+6A*v0EB_6vfEYVoCx0t*s~vx#d@t6Ks8TpG zPhFH$Gom_>a4T_rK_R<*XigzQX*(v1U^9K#n!Isz`2V!`ok2}^UzjL%1w=$&K)^zg zAVj2hlqMjZ5DAc|fOL>9C5c!-q=SI;UXoCxL^=_X-b?6BAe4j-p(ku`cmMzIJNsp4 z-``{=lbQRwGxywc&pG#;=Q)AtkLcqDgdW?@0+W?G{@M80M3&@%VcVbuYs0~iTbf*K zSCo#ByNjbaxHE4AF+ip+)fb!!kXZ<4|8fJ%d;js?1mlLQ|FDa}z2Cg5iY7p6Y9I>U zAnf;0;H#qCZ&!p4Lj~VTw(}Ec`b_*1y#_rL!AHnX^5zP;BZ$d`R$7K0x+Gb<2X?Gz z4t6m_*{uMcV{4f)A2Z^jWhmD*hb`~gf-w)t)g%jDKehe~~Vd3qkMw+e5DnPxp$MoKbJegk3BS1FSG#ZD*FzRSC zW%=ieo4p&FpGA4>bw{c+HjQmIk~U*4$dD^0iC$|3ig_Pkn#5N_^0lV8G?0Yp!xp*2 z-RL2&Ydm~z z8knG6)ne@a-mFuxy%JLJ94f3EZOIXX8P^Awx^$A1mnXr3y3twR5LFA+j`9uZZ>`^? zdf>d$8kF%W?j-i3W-!7>j(!RP<&yB;M=q>F*p}n6y8{?@N2Q9(zez@#jjbxO%jtKd~_Ja)fR9U z$1#37G^P|8v?8@GYY}~MG$FL!C_ahe! z#j{YQ1?^pn=E1Rdd(529N!03pG)o>t%T&VJ_4_!^vWwZ}8yFghuQrG2F^$}YeNeYf zEDK?^3*hj1lwzKWPB>h-smUTr9J>t!T=0yWB3`*8IH!!NVWD< z^xz(rOzSA#*8x)O<=8UamAJHD&HpyiaS7GJwK6c8Y5l$V+i|`ee{CN_D8uNyEToX3 z5rkHliQV016W63EQ|h3H1MeO!Bftxg6-EWpdYwCGT7$pKR0?Vtm63&LZE_59yClxp z+ikWFWerY@7oxbjTVL!7&K?eWHs`H+$7u)DY#y&gZ%Uv-fw6RH4BGsJec$z@wMjAk z-MY~O7u*p-BUWvn186?}_CB~>6eEuKU2Rmkys3*lO19PY z{4F#}D;SL@{X+Phf0vjnBB#AX(JIU6%9Mm+`hNW`E@>+o-Mo~{UaYqyB9ywe8<->O zyUrF<*8@$vk^Z@*ORSGU+B(*5%$!sUL0_(%#6mq!ILy` z4?ENiv99Ny8IO>O*O2*7gJFenFj|HZtHkEGv%E=~-rQh6eo}i^ea@~k=L+xdPfY>^ zrWwo@!2`vS<+3@w$d|}aJr&t*kt(x)f)De5;mCC3GF_x!cbY_s^%6zLF5VDg{zB{> zqz^c!-D&VE=N2f{4db(Ls`FOLn~6FpC-C-jA=q{-v9!E>wxDmO#+CPax%{nx0e#)_7Gp^1^=R{L&2EFq7S*JUT*)mlmdh+}%BML(Csu-xad)Jn*|bM+M6YTeIO-U- zQZoofh(jxGNRC!t$LWs)eCwV#;qHKU9$7Kq^lM|}04CcRj49C}R1?S~ly0`5rz(y_ zkW4xtWU#q5noptc3Li~C=XL0g!Rev96D^(}uG3W*f#Gn?GL_cJ>QW*4RP@k|)(L*- z4ijlkI5`)a>1q3Codd}qA!Jo~W3|w5-ZWKdWz^P5_MR05>Z z(`FYgi}Ba3IQqQlmjQ`C9o(pd?7Zb**+^K5wWk$poL+})T%qH9;VY&cz7y+SXlzCB z*j&C3E^y3$q-Hj{z2*&Gf8RIt=FwYB#?qCMRWAX!XSye@SVl^=x*Wple7-st;^?Ug z;eiu+TXC@P-L-G_=-IbHC2_uDJD&;w$*B(#d8NdZdpAh>OEGM_-8~8M>62E??buh* zhkcP;nJe{U9}{(!2CP@Ma$XnAMv^903ufuw zT-0(NS-ZrwtV5v~<*HxWS#9Ybue}H{hWNN_PN)icJbXakY#LK@U(vlhP8VxGwA2YS zRb03smowSp)^%0^R0Kg?D$1I)O%N4Kw@MqI)AtB9jRJFXZ#{kF9^T*rSGHan{3}d5 zT9&hidBUviAMRyG(qxr^G>w~2uc&3e<@vPv9isknUH}BoCB8&~V*#TBP5EmgC{1qW zW%W5lFTOq5Ttmtg4Q;1dY;LAun)!LKX&?@IR`hZf|7CIh$q2`z(}b|^nhSPVPeiN7 zrLCe45pkF(4m#vWe#=$(aqk5aa97>YK~RHw?>xB<7UdCfj-dj9oh;J<+it9(-&=Tx z!P4THuvLCeGHE3C#PK907wNeFhXsRGlR{Jj3P6}6OpC$3kqVxzh&9z{I*60 zy4mFH_IALS9W(wLlq{7}1Zi-1;YJId77a|E&W3*T+uc7{e+C(C$o`}yI`{pl%mU8+ z)$57T<^c|uu1KEaNe{?Tf_lmDS2+jxwgqO*`dydk6Ql`G0sHhO_NL8*+#DA43C zVX8CLJROgh8gd3-l@8n1k+KO z-x5#CA0E<<_+W=_PgG-iK5L~bR+^f_Ga|Nn6Gh9Kds#JR*FT9?x5~zEYPg0VpJX|^$F}bxe7Ct}M3|G*+Xq$-Z^2V39%;F|}C~>ti>mbYZ;6x-z z$n>TLY>=dcae~+NTaPiC-hEPjh#>T%ke-Dy7A-8AYtBy>(4e~6KA`52FRb?m6D10t zy`B2$Wy?*v^0LPkh9A7%iW%87q9GnO7vRX+ODO^oilyr_v`!3B*f*Z*I%x-Sn?~C& zF;<0tq=wlBf13#%ddn*FF3~g>+9|yN*&|^+ZG*|g87*(15Nn$931#5fCcZXraMqP$6SyZaOOD@> zz1lP2nv7vq`4hL)5&qun*$*hWTX*ox+H>CFEW=e9JeMr+-jlw#n|$pxJ9d<{g8p%^d{n;XTi1kZ zbJ1(=>&0rR>M6J?Y`J^&_*8QGeT^>~#>5B@nTqCujHOuu$1%%@hcP#I!QlCYCuFgG zk9o7w&3qk(Fi|7;K&E4G30_p_@gQH(^|+i6W^e`T@Jvq#wtbuW`xZzjHN!!uM>La5 zuUj~Gdn3iQzzlM?{EyxN`pA<{`OJF3Hhw>NC7cgCMXjKjBkL7>1qh2q+Lj>Pc}!6E z&bKnN-8q&n(xFUbM$_8XYzcR8{sFI`O#s0i_*j%sAozPIEN#3ELRc>BAN5w%oJ?R{ zhCeVLbI~)fFf2WW?({f=o9Js)>V0Kbw-YrlR^8wlw(@*SzhF?fm9lcx4MVeJ5lphVGfXuZ^ZqV^^kp zXOc@y#y3|5^>*&!qAVEpb<_p+hb(}rz?F*Zzj`|A=&yA9>grv(>GxH;;Y$SDtC zkzqpXFC!nNdaGia=<(8FntMIdC?&$CQ>m!=5M2 z6%P~Kn*`zd>5pc3S~0|IX^6gu(yFdseK6W4_Z(AqPsu|6Ttrqs7Vu44mt&`bu=YBx zfekqH+ggl2n=<)a23oibySZ2}%eDWc);omDQ+g(~XLQE%bxa(Ow^8iAAz>hVFt&DI zvtrI0;q4ihXXEw>O&Z_aJ9<=SA6+|jyOIn3q@Rz^>{ffphcO&ATcFLCoTeak;}%mt z`|JQc#USR;FLv2C*2efHR$=d98IxCOs|!-G@)6knPhK>j4T^H)uHlAw1vtQy*u`E( zN*u{R1EvbT5VXpi)C=US8C07T(mv|jCd4lFHJ2U=`dZsWb!44Io3Up{sS?nXMM_cm z2W+|Wu2d4ce6`+^^<}ZGPm-YHNZB&vKP634H2^VQ{E|g<(h!+`0Yx*XZ>X&)@81HmNDMEY{m=;STZSypQ5fMMjAO?>wT0@ zlh1@`OLo~(WtFw_4`U}U((G(H+48v+>RC(K|HcA@d$fow$3cF$FE@_W=nkPA8rnyp zrQo&H^|sxZJ7Y|oL`lE0K2g!Hx4brfP9uwybSXc~uhPLLGcYSXQ|NNJ8*z}oB>!OI1sbyo5x zbB`I1%GHi}?u=)fd0j?=vzoamUT~bRWrbE{mmfMoQQzKPr;Q@%+zu43HnCb;=}GGl z2P6$2+?tEa%~4U!c1Hi`66&^1-8N5L4nq(nAL!dg_r>&#MdZ2$&EEi%e%>j+zA`{h z|GxR~aR3jp;8ITEz>mw#v6q|4sX_)DhV>t`e4FdH566NpP|)TIP`C0bg6##aDXZsJ zq?Mk<8UrB?JR;8c*jHHI?q;98kOI71YB^P@Qu-w^6}aNs zR4wMHT4!J}5agy4f5TH?uq~NZ2?z>=ka73*IJC{F+GCk9AFWnaXUuMQcCf~8{ zR0(}F_q}Rhz%FlneDnBb(iJSmfl>Mv zqr6PDo@gD{ISk7Ro3~NqZa#F}Ly62#zY{{Cp8vY8QfI8V8gYod)j3fCUOVP3bIeR; zmm{*V3Tr#Lxwe^3(9l?!znPT>iq;r79^RsJg8apVB{Qa+n&$XCy`T7j2Bxetr-3A= z?>7AhK>6*z5X%>U{~#sbq5g}d%=s_c^54O9|L;iS!5?8dI>_P7c%0ojRdn2nPHEV0 zr@QoE)6b%!olb_}mkFChA_|YUigNy9|F#P zH282@bYC+C_eb~ynk&ituj2iCvi`L*bpPKP(!WNDR`OCmS_iLn}9esmCnHXs2ie#lueyQ)`7OUON}O zY)=OMdY68}T72^v-hdt*L91KO<{tF#uqR&tYftH=u67N5HHzbMR}rl8 z1$)%iVyY&+%U2J3rQ~aCo^ZpNS1&ogf*omWA+Hy#hPeO~2K^Vf1^bV3`RvAN-Sa%h z9Ek~#98peqQ4otSXe%lQ#esgTJ;wWO$6m7=#GA_3z*i2!J#xA%WOE10U& znX+U?NXDo3&*Lr)Y`cuMj-%E|YTx7mp=Z2_^TaBhxoTK6SL7o$NeEchq zsIFx{TL>x6O3-cIM@XCGF)=x>+x|!#XRJvzkCD*P z8vM!a2?A?<%&<5I*avqnRkp%`=8GLKSrrL=-(et>h=6&k2 z*B=+6q8Dxed~IyZ#m^}(L2b3j;~(qlal1`w=6qZ<+P2cS6Jo|Y)1Hg0MS#HtMNN=v z>6N0_4R-jW7iD~mL?do}cRhE5AM?3PHQT4!l^8f~@uKtt756@Gd>k9l3Mw74pG|ni zP%|r2tJ?pvwsPxU>ov^F4`Ynwhm3xD`pgAXg@mlb9H;(NCWromk=cP=O|q&AW^K6F z5fN6Gc?~~wcj;{?I!w~{wghO)x}0pK_c1{J)|Ds@MX7v6NhZu;mOY$cz&pj9T}Q9% z?>*4aCM*c*dUrvU>1_lsj<$PFum0!Z?SK0`kWrG@uP9g0dl~(tmPdDS-T$08ml@hc z5jgNN!0(}X5w|q}@4dgEqN3vNWOKnRC0o>f%Po2TNfi6`0*hnwF|&oF-Bgq@jO*w{ z8W-19{6=|@=Aau#-xwo2vr6#SUDQ&Df-_ZenBQb1b2KOj=-2f5f`=oyCujw9a9`N8 zJFNCOY_{GJy|13(n&MM2D^4@s(*gHRu2tn}7d%4s-dw{Hy6q^;8btKxLW4KU3)bRD zEmAM6dvTq6-u~4Lyy~#4u&Tfw>=9n8Te!UsP#Au2$6h8Me=ieH{xRl0=4dXytk`Ds zn00!pGyffbC^n?tzJ^%LUPs^%vK^kbt=1_=)|5z5bU~z$%PW_15E1s+CJ= z`Ym>kj7Km-HR!lnFU2gOq(+k;!nFG)NDcoCXKL=uG{};iP=qgSY8BtBn?CcU*UNs` zVhdzEr4})kzH(cFH6Gmdzf&s!WyFycwzquMfRQaiBB(2+VWS#edSs_{P0<+=%cqkvHJs# zQuEsOwg?~KDxP2ZJN3hSxAO&uL(r7cM_$1CdPm>S^nfk@pZnePNXl%J()_#6@9rL) z>uaZu&Tujk$P7;}sr;6^^|Z5vvBPxJY`W5F>va6Z+S5^|i7{NmJ0|Db3tAa}L$2$o z6#e|koqMh?mvF}Q^}?A)>SP8LjZ-EWr;F*&ojXUBl8&HamW#N9BTkoWGt3=r__Ys> z#c+OJiRO9^+gkX!ut4ws6UTnqS?98Xq9?>7ZNK!mX8F%FNowKj4u#a$#}3tGRAKmb zQ)-~eg43n$kKT*1jzD_-*u0+&qElDI+#OI#$8%NDoL`4v4ex1 zH6j~)dqZ;UbRc`z`L0w&ZmDAv<>{WaYxK+pw_iEfPH!>#g$0ysD*=PwAKDmADQ)KY z`{(I%$X1x2rmdav?7Z~fDcr#7H>(zpNK5r_8R6VcO2MaNy9>*{(xN@PVYFS%%JzqM zD!yo_`}qT{Sborfr2WTu9dLa}%kv+a28@TCBu9QqPA(kBvn^Wb-@a=LpDrrkc2~Ul zv_|<9<+ixq=!0b0?L_}m8vb>qyI4Q!K2b?0>9m2-?b?hF7l*msP8T1S*B)LDC5{hC zQEz<<=AHX_>E!b(^12!CO&yzhHsU9@^>_E2-UM7vs%P-Tep+?ESv5QPJ+bU2FXRnB zmyLyUSU-JC(2J)ZHP!N@px61Hw5#adN}svF)uQz2ide*1&bQr-JPFUPC5p7$^{<@| zh%iBJ`VPt7{1YEH#SvA!!MVuu9Pe`Y7W-- zfpFl4c}lz!0JE_y#3GG7u#FG2;Ok|Uw1~-pjIz(BP`w)uabG*8QwY-t7IFwb$0=u6 zp}<3^SR+!6y?OE<6M1AoM?*mUR)zp#crWVSQP+0Dn|&Aa#49PG@sf?A8Y~DoMJ=~7 zNyl?IKtd(#@`v*4a=?V8*|5k5Eb?$`iec;;&L|PjD!wE-B*7hWVwt@@7}J>PFA^~V z$GB+ppipC2*5FR|O4ofhY?ivW6;Ql<;dvmd;e%Hin;?v%2Zy|AF@@PA$}SwOEyQ|D zHBQ1j5h;c{D52a-pq8O<1uJ>xQFdDp{Fy94-nGy7@PY|)sWABRPKB$N#by|`Le&(k zwonH|iJ2M{NA&kl6H1(Si1;}TR*8)Zyse{g`X#0Y9uUJJa7c|p=J@Y?9$4v3H^EWc zr+7hc)_WS+o(kSYt^&C;?=9DK@7KAOR_kRI*F$Ubf0147XHCsf8Qj}~lYIM%b7I&M z-lu|F-7i|b)CG*<_wtRMLr8O`B9%qBg5`;Y zGRkaiz8XKe1j^B~2%8{xU##>&&V*@}^2XFv=PXb8fTZD1S4nKi6#u4Wn#zw|Jh>eW zPZD`qt?dn3j4Q@W7S5=zuIh;UbjC`!C~HAhl})F7-dW4n#SQLkGKk|g9ysuZ~ZUCwF}ez0o^Eq^3kZLYeba^3r0;dY<}z>F=**UM~;d*>^h z8IGdmvewN`u7mGZ3ZVT*_cZL{C~KtDJ}1e+_=E6yiE>Ty*)8rl@q@<}^fl&x%ty&> z!Yf}GQQsf)*{y8Nk)NUo;tKjERT_^gFTzl>kMm1nel^SS%4CgtS;y2CSK8loetq5K z((kRyQFE9#cYuf8*5czgV~JMUp}*%gcy0M<&l?X+3(#UnpKC zmo%uxxZ4*-?2g0Lq97Rg{f}ibOr!%8y0VI=+OR9&<;vjl1%9zjZH?^U6f>>9+9Wlg zgRAYq>jA+8AIa==*Jl=B*GzK6$zNRZ>s*UJQ(}9|2e{;eJU8=KL{NkPD zZ+QX;JBSxx*WVC$ysf)=b=*Y*+N`p#vArzF=P)=nh8$bUetU;FU zoE){x8;Lf0?VQ=!Iv0~r76D$hdydLvZahx<$`J*K%Q4!{CK3cb5h4l4fMW6nrDWU9oAscyqroQ@9C*H@iemfV(_xd?_=3$A=1?*n+PhaBIM=Bn z=ILQTk?-F0cD+7ntF;NL13{}dxoNAwX`KX-FMKb1p!*}Uyz%oxidG0)R?Ld~xsroj zZ$K)yVIAO8IHUc#(pzocpwN%u62?4-l5TC2c>p8=qEl5o^VZD{Fy*wWJYgv+Bw_3l z<0s0eB~IYqM4NS?rukw<1eUNNQSwpfXm*@I?=~Uo6}%HuD&#S$MfrjB3A4wO#5;%l zG-8v&)XsLxFxi)l*)gGdS*i#NLvv8$c&40`61yn_o{~?H#Cyt{7w0E^+ zog3M6A2qvGF5`K!AMStM(9R84JV$Z1k9Lf2jE7Dh+RsZTDtmIs7fB*QBzC{wg~cXy z-QW=Q;voo?7fH#dl@9q5ILBVlSCIG4GH|mQxMa?Gr{{0dYozN`R!hQ6_ntyFBv77K z&zzAaMr*E23wkQ??ncJ7=vLq)*i)jgqkg{V?1Re4rgI@NiM(fr#%1_m+1 zq|i6y_=R+n@-Z7^@k7i^r_kUM7*q-MvkPAt_0z6 z!ShwRrt#{AWY1);a4VS5Y;1b%xn@la)D9+6*jE=~U@m0FRZ?b=o)zy);x066L4Ut( zB5&(`NCP(v-QnFG>-!aQ$;$#W|8u4N{vaQ|Z~-)uYVazVu=uAzKy{So_`rk#23NqO z2(ruq9z5-OtQ((gy|cX4^V@_SS(`t;Fl)-#O*ya1D*5f6P{(~Q)B|keSEBtNt-#m# z8l79u>`D&v3(c3oSt~=r-z`4@67~_4-hIjDnB)y!S>d(C8$?<#`BRHf=@m_LP;Yzl z8yeVsbMpp{uLx|F>m*a|r+*d#AzN%0YwvUpG-x6fGPc`OW!ou5AIH1f0I5h=eAp>o zXr!HD7y34sN|8NktLsi9)Qi=r)Ckjs zmJ-QGH$1yPp%I$IZI=!Yls-5)wrDg{x)bDA>CAAy*e#mypaZkn5;yiz0|6>!Y1mW{csgMl znxdhIcx{>xn6u%V8z83O_T@nAYw9F`ps*nJp29vWipFn)|ws*lYi^BYKL4v#AW-mydaiW}2Bb zVn>Bwjle^XzRj=0LzwRtT9;X43g^WRTJv5j7uEo~QOL_DeTk-dEod4=y*jVGM0y;p zFwLIZ@aU3B)2gfX(PEnPF6HRU)@tIo??slSk`NvX>B1mxPKhDJqb4K1&YzJLF0uns zgPh{6;kljT0Z6yr>)lz}HO(TimIL$OTKu!vD&9_gOAUTK_xzViwd1|k7RJQI`Z6}E z=j6A*=-1zY)QWQ+6?~JD4uzePt0o71qP@vKiXXHOPv}P=_l8#$pl8Ux{ zVHPzNj?gyq5+Z|*8(GCX79N#+ zC1WCPE~-9BV4--p{rA=W^M~JX&VPKe4{K9VHO6eEd2@1m`lyK)Z(B`~{{?fW>d67;mJOi9K({MHu+{Xv)Z- z_BZBsmPGx?jw=Uu(S4GWS~wRz+RHy~LL6ydNBTy&Uu%hNhhaV5%48uA2gLrzXrkh1 zCH(z~dpmOaUU{2#B!A3d){CM64;~yt4XOPvW0;uaCT}w#=_W2R+zS1h0-ly^{zAJC zzH>w$P`;HYej=4C$uqrM2xC2Ih%xrHACGF=M`ZWWx8YYF?_1K* zoodujd2Dp84a#PiF{=tDm!JU0`kH|XLM?G~Fir!x$i6XHhfSjR1nqS?grS4*NgnUy zxo} Date: Fri, 6 May 2022 12:59:41 -0700 Subject: [PATCH 099/244] Check implicit grad acc in GLUE dataset building (#4123) * Check implicit grad acc in GLUE dataset building Signed-off-by: MaximumEntropy * Fix jenkins test for GLUE/XNLI Signed-off-by: MaximumEntropy --- Jenkinsfile | 6 ++--- .../language_modeling/megatron_glue_model.py | 22 +++++++++++++++---- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index e2c73a9f9d04..5554ff68a2eb 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -2832,7 +2832,7 @@ pipeline { model.data.train_ds.task_name=rte \ model.data.train_ds.global_batch_size=4 \ model.data.train_ds.micro_batch_size=2 \ - model.data.validation_ds.global_batch_size=4 \ + model.data.validation_ds.global_batch_size=2 \ model.data.validation_ds.micro_batch_size=2 \ model.data.train_ds.file_path=/home/TestData/nlp/megatron_t5/data/train_ci.tsv \ model.data.validation_ds.task_name=rte \ @@ -2860,9 +2860,9 @@ pipeline { model.pipeline_model_parallel_split_rank=0 \ model.data.train_ds.global_batch_size=4 \ model.data.train_ds.micro_batch_size=2 \ - model.data.validation_ds.global_batch_size=4 \ + model.data.validation_ds.global_batch_size=2 \ model.data.validation_ds.micro_batch_size=2 \ - model.data.test_ds.global_batch_size=4 \ + model.data.test_ds.global_batch_size=2 \ model.data.test_ds.micro_batch_size=2 \ model.data.train_ds.task_name=rte \ model.data.train_ds.file_path=/home/TestData/nlp/megatron_t5/data/train_ci.tsv \ diff --git a/nemo/collections/nlp/models/language_modeling/megatron_glue_model.py b/nemo/collections/nlp/models/language_modeling/megatron_glue_model.py index f089fef32514..cae1ad17afb8 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_glue_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_glue_model.py @@ -21,6 +21,13 @@ from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel from nemo.utils import logging +try: + from apex.transformer import parallel_state + + HAVE_APEX = True +except (ImportError, ModuleNotFoundError): + HAVE_APEX = False + __all__ = ['MegatronT5GLUEModel'] @@ -30,7 +37,14 @@ class MegatronT5GLUEModel(MegatronT5FinetuneModel): def __init__(self, cfg: DictConfig, trainer: Trainer): super().__init__(cfg, trainer=trainer) - def _build_dataset(self, data_cfg): + def _build_dataset(self, data_cfg, check_implict_grad_acc=False): + if ( + check_implict_grad_acc + and data_cfg.global_batch_size > data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size() + ): + raise ValueError( + f'You are trying to use "implicit gradient accumulation" of {data_cfg.global_batch_size // (data_cfg.micro_batch_size * parallel_state.get_data_parallel_world_size())} in your validation/test datasets. This is not supported. Please set global_batch_size equal to micro_batch_size * data_parallel_world_size.' + ) if data_cfg.task_name == 'xnli': dataset = TextToTextXNLIDataset( data_cfg.file_path, @@ -52,17 +66,17 @@ def build_train_valid_test_datasets(self, stage): logging.info('Building GLUE/XNLI datasets.') if stage != 'test': # Wrap this in a list since the general finetuning parent class supports multi-validation. - self._validation_ds = [self._build_dataset(self.cfg.data.validation_ds)] + self._validation_ds = [self._build_dataset(self.cfg.data.validation_ds, check_implict_grad_acc=True)] logging.info(f'Length of val dataset: {len(self._validation_ds)}') if stage != 'validate': if hasattr(self.cfg.data, 'test_ds'): # Wrap this in a list since the general finetuning parent class supports multi-validation. - self._test_ds = [self._build_dataset(self.cfg.data.test_ds)] + self._test_ds = [self._build_dataset(self.cfg.data.test_ds, check_implict_grad_acc=True)] logging.info(f'Length of test dataset: {len(self._test_ds)}') if stage == 'validate' or stage == 'test': return - self._train_ds = self._build_dataset(self.cfg.data.train_ds) + self._train_ds = self._build_dataset(self.cfg.data.train_ds, check_implict_grad_acc=False) logging.info(f'Length of train dataset: {len(self._train_ds)}') logging.info(f'Finished building GLUE/XNLI datasets.') From 6fd62548b8fbb6fd63a595ca9197a68aa91a3ca0 Mon Sep 17 00:00:00 2001 From: Evelina <10428420+ekmb@users.noreply.github.com> Date: Mon, 9 May 2022 10:22:19 -0700 Subject: [PATCH 100/244] update the default (#4135) Signed-off-by: ekmb --- .../ru/data/measurements.tsv | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/nemo_text_processing/text_normalization/ru/data/measurements.tsv b/nemo_text_processing/text_normalization/ru/data/measurements.tsv index c5c510f6be9b..a48079dd2268 100644 --- a/nemo_text_processing/text_normalization/ru/data/measurements.tsv +++ b/nemo_text_processing/text_normalization/ru/data/measurements.tsv @@ -84,17 +84,17 @@ га гектарами га гектаре га гектарах -м² квадратный метр -0.1 -м² квадратные метры -0.1 -м² квадратного метра -0.1 -м² квадратных метров -0.1 -м² квадратному метру -0.1 -м² квадратным метрам -0.1 -м² квадратные метры -0.1 -м² квадратным метром -0.1 -м² квадратными метрами -0.1 -м² квадратном метре -0.1 -м² квадратных метрах -0.1 +м² квадратный метр -0.11 +м² квадратные метры -0.11 +м² квадратного метра -0.11 +м² квадратных метров -0.11 +м² квадратному метру -0.11 +м² квадратным метрам -0.11 +м² квадратные метры -0.11 +м² квадратным метром -0.11 +м² квадратными метрами -0.11 +м² квадратном метре -0.11 +м² квадратных метрах -0.11 кв. м. квадратный метр -0.1 кв. м. квадратные метры -0.1 кв. м. квадратного метра -0.1 From 54f6bbf0a9310c164e84e14fa0150830b375eff9 Mon Sep 17 00:00:00 2001 From: PeganovAnton Date: Mon, 9 May 2022 22:45:56 +0300 Subject: [PATCH 101/244] Draft: Fix restoring from checkpoint for case when `model.common_dataset_parameters.label_vocab_dir` is provided (#4136) * Fix restoring from checkpoint with label vocab dir Signed-off-by: PeganovAnton * Add tests for various ways to pass label ids to model Signed-off-by: PeganovAnton * Fix typo Signed-off-by: PeganovAnton * Fix typo Signed-off-by: PeganovAnton * Do not create tmp directory Signed-off-by: PeganovAnton * Fix parameter name Signed-off-by: PeganovAnton * finish cherry-pick op Signed-off-by: PeganovAnton * Fix labels errors Signed-off-by: PeganovAnton * Remove duplicate stage Signed-off-by: PeganovAnton * Change target branch Signed-off-by: PeganovAnton --- Jenkinsfile | 99 +++++++++++++++++++ .../punctuation_capitalization_model.py | 18 ++-- 2 files changed, 108 insertions(+), 9 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 5554ff68a2eb..a8b6b8067ba3 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1555,6 +1555,105 @@ pipeline { } } } + stage('Punctuation & Capitalization, Different ways of passing labels to model') { + when { + anyOf { + branch 'r1.9.0' + changeRequest target: 'r1.9.0' + } + } + failFast true + stages { + stage('Punctuation & Capitalization, Using model.common_datasest_parameters.label_vocab_dir') { + steps { + sh 'cd examples/nlp/token_classification && \ + label_vocab_dir=label_vocab_dir && \ + mkdir -p ${label_vocab_dir} && \ + punct_label_vocab="${label_vocab_dir}/punct_label_vocab.csv" && \ + capit_label_vocab="${label_vocab_dir}/capit_label_vocab.csv" && \ + printf "O\n,\n.\n?\n" > "${punct_label_vocab}" && \ + printf "O\nU\n" > "${capit_label_vocab}" && \ + CUDA_LAUNCH_BLOCKING=1 python punctuation_capitalization_train_evaluate.py \ + model.train_ds.use_tarred_dataset=false \ + model.train_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + model.validation_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + model.test_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + model.language_model.pretrained_model_name=distilbert-base-uncased \ + model.common_dataset_parameters.label_vocab_dir="${label_vocab_dir}" \ + model.class_labels.punct_labels_file="$(basename "${punct_label_vocab}")" \ + model.class_labels.capit_labels_file="$(basename "${capit_label_vocab}")" \ + +model.train_ds.use_cache=false \ + +model.validation_ds.use_cache=false \ + +model.test_ds.use_cache=false \ + trainer.devices=[0,1] \ + trainer.strategy=ddp \ + trainer.max_epochs=1 \ + +exp_manager.explicit_log_dir=/home/TestData/nlp/token_classification_punctuation/output \ + +do_testing=false && \ + CUDA_LAUNCH_BLOCKING=1 python punctuation_capitalization_train_evaluate.py \ + +do_training=false \ + +do_testing=true \ + ~model.train_ds \ + ~model.validation_ds \ + model.test_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + pretrained_model=/home/TestData/nlp/token_classification_punctuation/output/checkpoints/Punctuation_and_Capitalization.nemo \ + +model.train_ds.use_cache=false \ + +model.validation_ds.use_cache=false \ + +model.test_ds.use_cache=false \ + trainer.devices=[0,1] \ + trainer.strategy=ddp \ + trainer.max_epochs=1 \ + exp_manager=null && \ + rm -r "${label_vocab_dir}" && \ + rm -rf /home/TestData/nlp/token_classification_punctuation/output/*' + } + } + stage('Punctuation & Capitalization, Using model.common_datasest_parameters.{punct,capit}_label_ids') { + steps { + sh 'cd examples/nlp/token_classification && \ + conf_path=/home/TestData/nlp/token_classification_punctuation && \ + conf_name=punctuation_capitalization_config_with_ids && \ + cp conf/punctuation_capitalization_config.yaml "${conf_path}/${conf_name}.yaml" && \ + sed -i $\'s/punct_label_ids: null/punct_label_ids: {O: 0, \\\',\\\': 1, .: 2, \\\'?\\\': 3}/\' \ + "${conf_path}/${conf_name}.yaml" && \ + sed -i $\'s/capit_label_ids: null/capit_label_ids: {O: 0, U: 1}/\' \ + "${conf_path}/${conf_name}.yaml" && \ + CUDA_LAUNCH_BLOCKING=1 python punctuation_capitalization_train_evaluate.py \ + --config-path "${conf_path}" \ + --config-name "${conf_name}" \ + model.train_ds.use_tarred_dataset=false \ + model.train_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + model.validation_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + model.test_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + model.language_model.pretrained_model_name=distilbert-base-uncased \ + +model.train_ds.use_cache=false \ + +model.validation_ds.use_cache=false \ + +model.test_ds.use_cache=false \ + trainer.devices=[0,1] \ + trainer.strategy=ddp \ + trainer.max_epochs=1 \ + +exp_manager.explicit_log_dir=/home/TestData/nlp/token_classification_punctuation/output \ + +do_testing=false && \ + CUDA_LAUNCH_BLOCKING=1 python punctuation_capitalization_train_evaluate.py \ + +do_training=false \ + +do_testing=true \ + ~model.train_ds \ + ~model.validation_ds \ + model.test_ds.ds_item=/home/TestData/nlp/token_classification_punctuation \ + pretrained_model=/home/TestData/nlp/token_classification_punctuation/output/checkpoints/Punctuation_and_Capitalization.nemo \ + +model.train_ds.use_cache=false \ + +model.validation_ds.use_cache=false \ + +model.test_ds.use_cache=false \ + trainer.devices=[0,1] \ + trainer.strategy=ddp \ + trainer.max_epochs=1 \ + exp_manager=null && \ + rm -rf /home/TestData/nlp/token_classification_punctuation/output/* && \ + rm "${conf_path}/${conf_name}.yaml"' + } + } + } + } stage('Punctuation & Capitalization inference') { when { anyOf { diff --git a/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py b/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py index be7fa62a382f..5a5f6c025eea 100644 --- a/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py +++ b/nemo/collections/nlp/models/token_classification/punctuation_capitalization_model.py @@ -638,16 +638,16 @@ def _check_label_config_parameters(self) -> None: ) def _extract_label_vocab_files_from_config(self) -> Tuple[Optional[Path], Optional[Path]]: - if self._cfg.common_dataset_parameters.label_vocab_dir is None: - if self._is_model_being_restored(): - punct_label_vocab_file = self._cfg.class_labels.punct_labels_file - capit_label_vocab_file = self._cfg.class_labels.capit_labels_file - else: - punct_label_vocab_file, capit_label_vocab_file = None, None + if self._is_model_being_restored(): + punct_label_vocab_file = self._cfg.class_labels.punct_labels_file + capit_label_vocab_file = self._cfg.class_labels.capit_labels_file else: - label_vocab_dir = Path(self._cfg.common_dataset_parameters.label_vocab_dir).expanduser() - punct_label_vocab_file = label_vocab_dir / self._cfg.class_labels.punct_labels_file - capit_label_vocab_file = label_vocab_dir / self._cfg.class_labels.capit_labels_file + if self._cfg.common_dataset_parameters.label_vocab_dir is None: + punct_label_vocab_file, capit_label_vocab_file = None, None + else: + label_vocab_dir = Path(self._cfg.common_dataset_parameters.label_vocab_dir).expanduser() + punct_label_vocab_file = label_vocab_dir / self._cfg.class_labels.punct_labels_file + capit_label_vocab_file = label_vocab_dir / self._cfg.class_labels.capit_labels_file return punct_label_vocab_file, capit_label_vocab_file def _set_label_ids(self) -> None: From 9f5b44383152d0c108c72530425c5289cb2f0f46 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Mon, 9 May 2022 19:48:13 -0700 Subject: [PATCH 102/244] fix typo (#4140) Signed-off-by: Yang Zhang --- tutorials/text_processing/WFST_Tutorial.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tutorials/text_processing/WFST_Tutorial.ipynb b/tutorials/text_processing/WFST_Tutorial.ipynb index 981a2d5cef5c..85c60b66e0a7 100644 --- a/tutorials/text_processing/WFST_Tutorial.ipynb +++ b/tutorials/text_processing/WFST_Tutorial.ipynb @@ -6754,7 +6754,7 @@ "Our last step is to create a universal Verbalizer for all classes. This is very similar to development of `ClassifierFst`, except that the Verbalizer breaks its normalization task into two components:\n", "- `VerbalizeFst`, which removes formatting for each token\n", "- `VerbalizeFinalFst`, which extends `VerbalizeFst` across all tokens in a string\n", - "Why two componenets when `tokenize_and_classify` was one? Because Sparrowhawk performs all the functionality of `VerbalizeFinalFst`, so its inclusion would break deployment. However, without it, your NeMo grammar would be unable to function at base. So we separate the two to allow the best of both world." + "Why two components when `tokenize_and_classify` was one? Because Sparrowhawk performs all the functionality of `VerbalizeFinalFst`, so its inclusion would break deployment. However, without it, your NeMo grammar would be unable to function at base. So we separate the two to allow the best of both world." ] }, { From 3389242e00283ed4c5d60322774af05b73b8ca38 Mon Sep 17 00:00:00 2001 From: PeganovAnton Date: Tue, 10 May 2022 17:23:31 +0300 Subject: [PATCH 103/244] Fix/punctuation avoid overwritting tmp files (#4144) * Add draft of fixing tmp files overwritting Signed-off-by: PeganovAnton * Remove accidental changes Signed-off-by: PeganovAnton * Remove accidental changes Signed-off-by: PeganovAnton * Use built-in tempfile library Signed-off-by: PeganovAnton * Fix code style Signed-off-by: PeganovAnton --- ...nctuation_capitalization_tarred_dataset.py | 69 +++++++++++-------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py index 363c1a0a1e9d..2bfcb7969b6e 100644 --- a/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py +++ b/nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py @@ -19,6 +19,7 @@ import pickle import re import shutil +import tempfile from collections import deque from pathlib import Path from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple, Type, Union @@ -160,36 +161,44 @@ def process_fragment( special_tokens=special_tokens, use_fast=use_fast_tokenizer, ) - tmp_text = output_dir / f'tmp_text_{fragment_idx}.txt' - tmp_labels = output_dir / f'tmp_labels_{fragment_idx}.txt' - with text_file.open() as tf, labels_file.open() as lf, tmp_text.open('w') as otf, tmp_labels.open('w') as olf: - tf.seek(text_start_pos) - lf.seek(label_start_pos) - for _ in range(lines_per_dataset_fragment): - text_line = tf.readline() - if not text_line: - break - otf.write(text_line) - olf.write(lf.readline()) - dataset = BertPunctuationCapitalizationDataset( - tmp_text, - tmp_labels, - max_seq_length, - tokenizer, - tokens_in_batch=tokens_in_batch, - pad_label=pad_label, - punct_label_ids=punct_label_ids, - capit_label_ids=capit_label_ids, - n_jobs=0, - use_cache=False, - add_masks_and_segment_ids_to_batch=False, - verbose=False, - tokenization_progress_queue=tokenization_progress_queue, - batch_mark_up_progress_queue=batch_mark_up_progress_queue, - batch_building_progress_queue=batch_building_progress_queue, - ) - tmp_text.unlink() - tmp_labels.unlink() + tmp_text: Optional[str] = None + tmp_labels: Optional[str] = None + try: + otfd, tmp_text = tempfile.mkstemp(suffix='.txt', prefix=f'text_{fragment_idx}_', dir=output_dir, text=True) + olfd, tmp_labels = tempfile.mkstemp(suffix='.txt', prefix=f'labels_{fragment_idx}_', dir=output_dir, text=True) + with text_file.open() as tf, labels_file.open() as lf, os.fdopen(otfd, 'w') as otf, os.fdopen( + olfd, 'w' + ) as olf: + tf.seek(text_start_pos) + lf.seek(label_start_pos) + for _ in range(lines_per_dataset_fragment): + text_line = tf.readline() + if not text_line: + break + otf.write(text_line) + olf.write(lf.readline()) + dataset = BertPunctuationCapitalizationDataset( + tmp_text, + tmp_labels, + max_seq_length, + tokenizer, + tokens_in_batch=tokens_in_batch, + pad_label=pad_label, + punct_label_ids=punct_label_ids, + capit_label_ids=capit_label_ids, + n_jobs=0, + use_cache=False, + add_masks_and_segment_ids_to_batch=False, + verbose=False, + tokenization_progress_queue=tokenization_progress_queue, + batch_mark_up_progress_queue=batch_mark_up_progress_queue, + batch_building_progress_queue=batch_building_progress_queue, + ) + finally: + if tmp_text is not None and os.path.exists(tmp_text): + os.remove(tmp_text) + if tmp_labels is not None and os.path.exists(tmp_labels): + os.remove(tmp_labels) dataset.features_pkl.unlink() tar_ctr = 0 current_file_name = output_dir / TAR_FRAGMENT_TMPL_IN_PROGRESS.format(fragment_idx=fragment_idx, file_idx=tar_ctr) From c339c04841a80faa6d4a60332d43caed3b62792c Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Tue, 10 May 2022 10:26:40 -0700 Subject: [PATCH 104/244] bug_fix_diarization_manifest_creation (#4125) Signed-off-by: Yang Zhang Co-authored-by: Nithin Rao --- scripts/speaker_tasks/pathsfiles_to_manifest.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/speaker_tasks/pathsfiles_to_manifest.py b/scripts/speaker_tasks/pathsfiles_to_manifest.py index 31d6df40fdc4..f9805631231a 100644 --- a/scripts/speaker_tasks/pathsfiles_to_manifest.py +++ b/scripts/speaker_tasks/pathsfiles_to_manifest.py @@ -50,7 +50,7 @@ def get_dict_from_wavlist(pathlist): path_dict = od() pathlist = sorted(pathlist) for line_path in pathlist: - uniq_id = os.path.basename(line_path).split('.')[0] + uniq_id = os.path.splitext(os.path.basename(line_path))[0] path_dict[uniq_id] = line_path return path_dict @@ -58,7 +58,7 @@ def get_dict_from_wavlist(pathlist): def get_dict_from_list(data_pathlist, uniqids): path_dict = {} for line_path in data_pathlist: - uniq_id = os.path.basename(line_path).split('.')[0] + uniq_id = os.path.splitext(os.path.basename(line_path))[0] if uniq_id in uniqids: path_dict[uniq_id] = line_path else: From df33239091a3b68053b239f0d0dc32367ff0fc12 Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Tue, 10 May 2022 20:52:08 -0700 Subject: [PATCH 105/244] fix doc (#4146) Signed-off-by: Yang Zhang --- tutorials/nlp/02_NLP_Tokenizers.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorials/nlp/02_NLP_Tokenizers.ipynb b/tutorials/nlp/02_NLP_Tokenizers.ipynb index 7199a4c67a14..f2e52a394f8c 100644 --- a/tutorials/nlp/02_NLP_Tokenizers.ipynb +++ b/tutorials/nlp/02_NLP_Tokenizers.ipynb @@ -98,7 +98,7 @@ "\n", "Hugging Face and Megatron tokenizers (which uses Hugging Face underneath) can be automatically instantiated by only `tokenizer_name`, which downloads the corresponding `vocab_file` from the internet. \n", "\n", - "For SentencePieceTokenizer, WordTokenizer, and CharTokenizers `tokenizer_model` or/and `vocab_file` can be generated offline in advance using [`scripts/tokenizers/process_asr_text_tokenizer.py`](https://github.com/NVIDIA/NeMo/blob/stable/scripts/process_asr_text_tokenizer.py)\n", + "For SentencePieceTokenizer, WordTokenizer, and CharTokenizers `tokenizer_model` or/and `vocab_file` can be generated offline in advance using [`scripts/tokenizers/process_asr_text_tokenizer.py`](https://github.com/NVIDIA/NeMo/blob/stable/scripts/tokenizers/process_asr_text_tokenizer.py)\n", "\n", "The tokenizers in NeMo are designed to be used interchangeably, especially when\n", "used in combination with a BERT-based model.\n", @@ -381,7 +381,7 @@ "id": "ykwKmREuPQE-" }, "source": [ - "We use the [`scripts/tokenizers/process_asr_text_tokenizer.py`](https://github.com/NVIDIA/NeMo/blob/stable/scripts/process_asr_text_tokenizer.py) script to create a custom tokenizer model with its own vocabulary from an input file" + "We use the [`scripts/tokenizers/process_asr_text_tokenizer.py`](https://github.com/NVIDIA/NeMo/blob/stable/scripts/tokenizers/process_asr_text_tokenizer.py) script to create a custom tokenizer model with its own vocabulary from an input file" ] }, { @@ -585,4 +585,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} From 30db4d4f3fff2cd8304e4b64e5141f87e5292123 Mon Sep 17 00:00:00 2001 From: treacker <36159472+treacker@users.noreply.github.com> Date: Wed, 11 May 2022 19:25:37 +0400 Subject: [PATCH 106/244] Tacotron2 retrain (#4103) * fix yaml Signed-off-by: treacker * Fix for new TTSDataset class Signed-off-by: treacker * added wandb logging Signed-off-by: treacker * added wandb logging Signed-off-by: treacker * fix numpy version Signed-off-by: treacker * fix numpy version Signed-off-by: treacker * inference fix Signed-off-by: treacker * removed old code Signed-off-by: treacker * updated parser logic Signed-off-by: treacker * reverted version update Signed-off-by: treacker * refactored parser logic Signed-off-by: treacker * Updated Jenkinsfile Signed-off-by: treacker * Refactored tutorial for Tacotron2 Signed-off-by: treacker * Made backward compatibility Signed-off-by: treacker * Made backward compatibility Signed-off-by: treacker * Update Jenkinsfile Signed-off-by: treacker * Update tacotron.yaml Signed-off-by: treacker * Refactoring Signed-off-by: treacker * cleaned up TN/ ITN doc (#4119) * cleaned up TN/ ITN doc Signed-off-by: Yang Zhang * fix typo Signed-off-by: Yang Zhang * fix image Signed-off-by: Yang Zhang * fix image Signed-off-by: Yang Zhang Signed-off-by: treacker * Check implicit grad acc in GLUE dataset building (#4123) * Check implicit grad acc in GLUE dataset building Signed-off-by: MaximumEntropy * Fix jenkins test for GLUE/XNLI Signed-off-by: MaximumEntropy Signed-off-by: treacker * Refactoring Signed-off-by: treacker * Refactoring Signed-off-by: treacker * Fixed jenkins Signed-off-by: treacker * Refactoring Signed-off-by: treacker * Refactoring Signed-off-by: treacker * Refactoring Signed-off-by: treacker Co-authored-by: Yang Zhang Co-authored-by: Sandeep Subramanian --- Jenkinsfile | 12 +- examples/tts/conf/tacotron2.yaml | 163 ++++-- nemo/collections/tts/helpers/helpers.py | 68 +++ nemo/collections/tts/models/tacotron2.py | 172 ++++-- tutorials/tts/Tacotron2_Training.ipynb | 680 +++++++++++------------ 5 files changed, 640 insertions(+), 455 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index a8b6b8067ba3..63978e89c011 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -2983,7 +2983,6 @@ pipeline { } } parallel { - // TODO(Oktai15): update it in 1.8.0 version stage('Tacotron 2') { steps { sh 'python examples/tts/tacotron2.py \ @@ -2993,13 +2992,18 @@ pipeline { trainer.accelerator="gpu" \ +trainer.limit_train_batches=1 +trainer.limit_val_batches=1 trainer.max_epochs=1 \ trainer.strategy=null \ - model.train_ds.dataloader_params.batch_size=4 \ - model.validation_ds.dataloader_params.batch_size=4 \ model.decoder.decoder_rnn_dim=256 \ model.decoder.attention_rnn_dim=1024 \ model.decoder.prenet_dim=128 \ model.postnet.postnet_n_convolutions=3 \ - ~trainer.check_val_every_n_epoch' + model.train_ds.dataloader_params.batch_size=4 \ + model.train_ds.dataloader_params.num_workers=1 \ + model.validation_ds.dataloader_params.batch_size=4 \ + model.validation_ds.dataloader_params.num_workers=1 \ + ~model.text_normalizer \ + ~model.text_normalizer_call_kwargs \ + ~trainer.check_val_every_n_epoch \ + ' } } stage('WaveGlow') { diff --git a/examples/tts/conf/tacotron2.yaml b/examples/tts/conf/tacotron2.yaml index 58fbf4d750c2..a12b8d5489d6 100644 --- a/examples/tts/conf/tacotron2.yaml +++ b/examples/tts/conf/tacotron2.yaml @@ -1,81 +1,136 @@ -# TODO(Oktai15): update this config in 1.8.0 version +# This config contains the default values for training Tacotron2 model on LJSpeech dataset. +# If you want to train model on other dataset, you can change config values according to your dataset. +# Most dataset-specific arguments are in the head of the config file, see below. name: Tacotron2 -sample_rate: 22050 -# , , will be added by the tacotron2.py script -labels: [' ', '!', '"', "'", '(', ')', ',', '-', '.', ':', ';', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', - 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', ']', - 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', - 'u', 'v', 'w', 'x', 'y', 'z'] -n_fft: 1024 -n_mels: 80 -fmax: 8000 -n_stride: 256 -pad_value: -11.52 + train_dataset: ??? validation_datasets: ??? +sup_data_path: null +sup_data_types: null + +phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" + + model: - labels: ${labels} + pitch_fmin: 65.40639132514966 + pitch_fmax: 2093.004522404789 + + sample_rate: 22050 + n_mel_channels: 80 + n_window_size: 1024 + n_window_stride: 256 + n_fft: 1024 + lowfreq: 0 + highfreq: 8000 + window: hann + pad_value: -11.52 + + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer + punct: true + stresses: true + chars: true + apostrophe: true + pad_with_space: true + g2p: + _target_: nemo.collections.tts.torch.g2ps.EnglishG2p + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + train_ds: dataset: - _target_: "nemo.collections.asr.data.audio_to_text.AudioToCharDataset" + _target_: "nemo.collections.tts.torch.data.TTSDataset" manifest_filepath: ${train_dataset} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} max_duration: null min_duration: 0.1 - trim: false - int_values: false - normalize: true - sample_rate: ${sample_rate} - # bos_id: 66 - # eos_id: 67 - # pad_id: 68 These parameters are added automatically in Tacotron2 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} dataloader_params: drop_last: false shuffle: true batch_size: 48 num_workers: 4 - - + pin_memory: false + validation_ds: dataset: - _target_: "nemo.collections.asr.data.audio_to_text.AudioToCharDataset" - manifest_filepath: ${validation_datasets} + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} max_duration: null min_duration: 0.1 - int_values: false - normalize: true - sample_rate: ${sample_rate} - trim: false - # bos_id: 66 - # eos_id: 67 - # pad_id: 68 These parameters are added automatically in Tacotron2 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} dataloader_params: drop_last: false shuffle: false - batch_size: 48 + batch_size: 24 num_workers: 8 + pin_memory: false preprocessor: _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures - dither: 0.0 - nfilt: ${n_mels} - frame_splicing: 1 - highfreq: ${fmax} + nfilt: ${model.n_mel_channels} + highfreq: ${model.highfreq} log: true log_zero_guard_type: clamp log_zero_guard_value: 1e-05 - lowfreq: 0 - mag_power: 1.0 - n_fft: ${n_fft} - n_window_size: 1024 - n_window_stride: ${n_stride} - normalize: null + lowfreq: ${model.lowfreq} + n_fft: ${model.n_fft} + n_window_size: ${model.n_window_size} + n_window_stride: ${model.n_window_stride} pad_to: 16 - pad_value: ${pad_value} + pad_value: ${model.pad_value} + sample_rate: ${model.sample_rate} + window: ${model.window} + normalize: null preemph: null - sample_rate: ${sample_rate} - window: hann + dither: 0.0 + frame_splicing: 1 + stft_conv: false + nb_augmentation_prob : 0 + mag_power: 1.0 + exact_pad: true + use_grads: false encoder: _target_: nemo.collections.tts.modules.tacotron2.Encoder @@ -90,7 +145,7 @@ model: gate_threshold: 0.5 max_decoder_steps: 1000 n_frames_per_step: 1 # currently only 1 is supported - n_mel_channels: ${n_mels} + n_mel_channels: ${model.n_mel_channels} p_attention_dropout: 0.1 p_decoder_dropout: 0.1 prenet_dim: 256 @@ -105,7 +160,7 @@ model: postnet: _target_: nemo.collections.tts.modules.tacotron2.Postnet - n_mel_channels: ${n_mels} + n_mel_channels: ${model.n_mel_channels} p_dropout: 0.5 postnet_embedding_dim: 512 postnet_kernel_size: 5 @@ -132,11 +187,15 @@ trainer: enable_checkpointing: False # Provided by exp_manager logger: False # Provided by exp_manager gradient_clip_val: 1.0 - log_every_n_steps: 200 - check_val_every_n_epoch: 25 + log_every_n_steps: 60 + check_val_every_n_epoch: 2 + exp_manager: exp_dir: null name: ${name} - create_tensorboard_logger: True - create_checkpoint_callback: True + create_tensorboard_logger: true + create_checkpoint_callback: true + checkpoint_callback_params: + monitor: val_loss + mode: min diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index fea53bf1d8fa..995c6bd59fac 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -56,6 +56,12 @@ from nemo.utils import logging +HAVE_WANDB = True +try: + import wandb +except ModuleNotFoundError: + HAVE_WANDB = False + try: from pytorch_lightning.utilities import rank_zero_only except ModuleNotFoundError: @@ -284,6 +290,7 @@ def tacotron2_log_to_tb_func( step, dataformats="HWC", ) + if add_audio: filterbank = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmax=fmax) log_mel = mel_postnet[0].data.cpu().numpy().T @@ -299,6 +306,67 @@ def tacotron2_log_to_tb_func( swriter.add_audio(f"audio/{tag}_target", audio / max(np.abs(audio)), step, sample_rate=sr) +def tacotron2_log_to_wandb_func( + swriter, + tensors, + step, + tag="train", + log_images=False, + log_images_freq=1, + add_audio=True, + griffin_lim_mag_scale=1024, + griffin_lim_power=1.2, + sr=22050, + n_fft=1024, + n_mels=80, + fmax=8000, +): + _, spec_target, mel_postnet, gate, gate_target, alignments = tensors + if not HAVE_WANDB: + return + if log_images and step % log_images_freq == 0: + alignments = [] + specs = [] + gates = [] + alignments += [ + wandb.Image(plot_alignment_to_numpy(alignments[0].data.cpu().numpy().T), caption=f"{tag}_alignment",) + ] + alignments += [ + wandb.Image(plot_spectrogram_to_numpy(spec_target[0].data.cpu().numpy()), caption=f"{tag}_mel_target",), + wandb.Image(plot_spectrogram_to_numpy(mel_postnet[0].data.cpu().numpy()), caption=f"{tag}_mel_predicted",), + ] + gates += [ + wandb.Image( + plot_gate_outputs_to_numpy( + gate_target[0].data.cpu().numpy(), torch.sigmoid(gate[0]).data.cpu().numpy(), + ), + caption=f"{tag}_gate", + ) + ] + + swriter.log({"specs": specs, "alignments": alignments, "gates": gates}) + + if add_audio: + audios = [] + filterbank = librosa.filters.mel(sr=sr, n_fft=n_fft, n_mels=n_mels, fmax=fmax) + log_mel = mel_postnet[0].data.cpu().numpy().T + mel = np.exp(log_mel) + magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale + audio_pred = griffin_lim(magnitude.T ** griffin_lim_power) + + log_mel = spec_target[0].data.cpu().numpy().T + mel = np.exp(log_mel) + magnitude = np.dot(mel, filterbank) * griffin_lim_mag_scale + audio_true = griffin_lim(magnitude.T ** griffin_lim_power) + + audios += [ + wandb.Audio(audio_true / max(np.abs(audio_true)), caption=f"{tag}_wav_target", sample_rate=sr,), + wandb.Audio(audio_pred / max(np.abs(audio_pred)), caption=f"{tag}_wav_predicted", sample_rate=sr,), + ] + + swriter.log({"audios": audios}) + + def plot_alignment_to_numpy(alignment, info=None): fig, ax = plt.subplots(figsize=(6, 4)) im = ax.imshow(alignment, aspect='auto', origin='lower', interpolation='none') diff --git a/nemo/collections/tts/models/tacotron2.py b/nemo/collections/tts/models/tacotron2.py index baae5f2f43fb..60146741d900 100644 --- a/nemo/collections/tts/models/tacotron2.py +++ b/nemo/collections/tts/models/tacotron2.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib from dataclasses import dataclass from typing import Any, Dict, List, Optional @@ -19,11 +20,15 @@ from hydra.utils import instantiate from omegaconf import MISSING, DictConfig, OmegaConf, open_dict from omegaconf.errors import ConfigAttributeError -from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger +from pytorch_lightning.loggers import LoggerCollection, TensorBoardLogger, WandbLogger from torch import nn from nemo.collections.common.parts.preprocessing import parsers -from nemo.collections.tts.helpers.helpers import get_mask_from_lengths, tacotron2_log_to_tb_func +from nemo.collections.tts.helpers.helpers import ( + get_mask_from_lengths, + tacotron2_log_to_tb_func, + tacotron2_log_to_wandb_func, +) from nemo.collections.tts.losses.tacotron2loss import Tacotron2Loss from nemo.collections.tts.models.base import SpectrogramGenerator from nemo.core.classes.common import PretrainedModelInfo, typecheck @@ -36,7 +41,7 @@ SequenceToSequenceAlignmentType, ) from nemo.core.neural_types.neural_type import NeuralType -from nemo.utils import logging +from nemo.utils import logging, model_utils @dataclass @@ -60,8 +65,28 @@ class Tacotron2Model(SpectrogramGenerator): """Tacotron 2 Model that is used to generate mel spectrograms from text""" def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): - if isinstance(cfg, dict): - cfg = OmegaConf.create(cfg) + # Convert to Hydra 1.0 compatible DictConfig + cfg = model_utils.convert_model_config_to_dict_config(cfg) + cfg = model_utils.maybe_update_config_version(cfg) + + # setup normalizer + self.normalizer = None + self.text_normalizer_call = None + self.text_normalizer_call_kwargs = {} + self._setup_normalizer(cfg) + + # setup tokenizer + self.tokenizer = None + if hasattr(cfg, 'text_tokenizer'): + self._setup_tokenizer(cfg) + + self.num_tokens = len(self.tokenizer.tokens) + self.tokenizer_pad = self.tokenizer.pad + self.tokenizer_unk = self.tokenizer.oov + # assert self.tokenizer is not None + else: + self.num_tokens = len(cfg.labels) + 3 + super().__init__(cfg=cfg, trainer=trainer) schema = OmegaConf.structured(Tacotron2Config) @@ -73,17 +98,17 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Ensure passed cfg is compliant with schema try: OmegaConf.merge(cfg, schema) - self.pad_value = self._cfg.preprocessor.pad_value + self.pad_value = cfg.preprocessor.pad_value except ConfigAttributeError: - self.pad_value = self._cfg.preprocessor.params.pad_value + self.pad_value = cfg.preprocessor.params.pad_value logging.warning( "Your config is using an old NeMo yaml configuration. Please ensure that the yaml matches the " "current version in the main branch for future compatibility." ) self._parser = None - self.audio_to_melspec_precessor = instantiate(self._cfg.preprocessor) - self.text_embedding = nn.Embedding(len(cfg.labels) + 3, 512) + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor) + self.text_embedding = nn.Embedding(self.num_tokens, 512) self.encoder = instantiate(self._cfg.encoder) self.decoder = instantiate(self._cfg.decoder) self.postnet = instantiate(self._cfg.postnet) @@ -94,46 +119,45 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): def parser(self): if self._parser is not None: return self._parser - if self._validation_dl is not None: - return self._validation_dl.dataset.manifest_processor.parser - if self._test_dl is not None: - return self._test_dl.dataset.manifest_processor.parser - if self._train_dl is not None: - return self._train_dl.dataset.manifest_processor.parser - - # Else construct a parser - # Try to get params from validation, test, and then train - params = {} - try: - params = self._cfg.validation_ds.dataset - except ConfigAttributeError: - pass - if params == {}: - try: - params = self._cfg.test_ds.dataset - except ConfigAttributeError: - pass - if params == {}: - try: - params = self._cfg.train_ds.dataset - except ConfigAttributeError: - pass - - name = params.get('parser', None) or 'en' - unk_id = params.get('unk_index', None) or -1 - blank_id = params.get('blank_index', None) or -1 - do_normalize = params.get('normalize', True) - self._parser = parsers.make_parser( - labels=self._cfg.labels, name=name, unk_id=unk_id, blank_id=blank_id, do_normalize=do_normalize, - ) + + ds_class_name = self._cfg.train_ds.dataset._target_.split(".")[-1] + if ds_class_name == "TTSDataset": + self._parser = None + elif hasattr(self._cfg, "labels"): + self._parser = parsers.make_parser( + labels=self._cfg.labels, + name='en', + unk_id=-1, + blank_id=-1, + do_normalize=True, + abbreviation_version="fastpitch", + make_table=False, + ) + elif ds_class_name == "AudioToCharWithPriorAndPitchDataset": + self.parser = self.vocab.encode + else: + raise ValueError("Wanted to setup parser, but model does not have necessary paramaters") + return self._parser - def parse(self, str_input: str) -> torch.tensor: - tokens = self.parser(str_input) - # Parser doesn't add bos and eos ids, so maunally add it - tokens = [len(self._cfg.labels)] + tokens + [len(self._cfg.labels) + 1] + def parse(self, text: str, normalize=True) -> torch.Tensor: + if self.training: + logging.warning("parse() is meant to be called in eval mode.") + if normalize and self.text_normalizer_call is not None: + text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs) + + eval_phon_mode = contextlib.nullcontext() + if hasattr(self.tokenizer, "set_phone_prob"): + eval_phon_mode = self.tokenizer.set_phone_prob(prob=1.0) + + with eval_phon_mode: + if self.tokenizer is not None: + tokens = self.tokenizer.encode(text) + else: + tokens = self.parser(text) + # Old parser doesn't add bos and eos ids, so maunally add it + tokens = [len(self._cfg.labels)] + tokens + [len(self._cfg.labels) + 1] tokens_tensor = torch.tensor(tokens).unsqueeze_(0).to(self.device) - return tokens_tensor @property @@ -259,18 +283,56 @@ def validation_step(self, batch, batch_idx): def validation_epoch_end(self, outputs): if self.logger is not None and self.logger.experiment is not None: - tb_logger = self.logger.experiment + logger = self.logger.experiment if isinstance(self.logger, LoggerCollection): for logger in self.logger: if isinstance(logger, TensorBoardLogger): - tb_logger = logger.experiment + logger = logger.experiment break - tacotron2_log_to_tb_func( - tb_logger, outputs[0].values(), self.global_step, tag="val", log_images=True, add_audio=False, - ) + if isinstance(logger, TensorBoardLogger): + tacotron2_log_to_tb_func( + logger, outputs[0].values(), self.global_step, tag="val", log_images=True, add_audio=False, + ) + elif isinstance(logger, WandbLogger): + tacotron2_log_to_wandb_func( + logger, outputs[0].values(), self.global_step, tag="val", log_images=True, add_audio=False, + ) avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() # This reduces across batches, not workers! self.log('val_loss', avg_loss) + def _setup_normalizer(self, cfg): + if "text_normalizer" in cfg: + normalizer_kwargs = {} + + if "whitelist" in cfg.text_normalizer: + normalizer_kwargs["whitelist"] = self.register_artifact( + 'text_normalizer.whitelist', cfg.text_normalizer.whitelist + ) + + self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) + self.text_normalizer_call = self.normalizer.normalize + if "text_normalizer_call_kwargs" in cfg: + self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs + + def _setup_tokenizer(self, cfg): + text_tokenizer_kwargs = {} + if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: + g2p_kwargs = {} + + if "phoneme_dict" in cfg.text_tokenizer.g2p: + g2p_kwargs["phoneme_dict"] = self.register_artifact( + 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, + ) + + if "heteronyms" in cfg.text_tokenizer.g2p: + g2p_kwargs["heteronyms"] = self.register_artifact( + 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, + ) + + text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) + + self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) + def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, name: str = "train"): if "dataset" not in cfg or not isinstance(cfg.dataset, DictConfig): raise ValueError(f"No dataset for {name}") @@ -289,11 +351,13 @@ def __setup_dataloader_from_config(self, cfg, shuffle_should_be: bool = True, na elif not shuffle_should_be and cfg.dataloader_params.shuffle: logging.error(f"The {name} dataloader for {self} has shuffle set to True!!!") - labels = self._cfg.labels - dataset = instantiate( - cfg.dataset, labels=labels, bos_id=len(labels), eos_id=len(labels) + 1, pad_id=len(labels) + 2 + cfg.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, ) + return torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params) def setup_training_data(self, cfg): diff --git a/tutorials/tts/Tacotron2_Training.ipynb b/tutorials/tts/Tacotron2_Training.ipynb index 8109b734af60..be021cb78212 100644 --- a/tutorials/tts/Tacotron2_Training.ipynb +++ b/tutorials/tts/Tacotron2_Training.ipynb @@ -1,347 +1,337 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "htbJiaJjYQAD" - }, - "source": [ - "# Tacotron 2 Training\n", - "\n", - "This notebook is designed to provide a guide on how to train Tacotron2 as part of the TTS pipeline. It contains the following sections\n", - "\n", - " 1. Tacotron2 and NeMo - An introduction to the Tacotron2 model\n", - " 2. LJSpeech - How to train Tacotron2 on LJSpeech\n", - " 3. Custom Datasets - How to collect audio data to train Tacotron2 for difference voices and languages" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wqPMTEXXYUP4" - }, - "source": [ - "# License\n", - "\n", - "> Copyright 2020 NVIDIA. All Rights Reserved.\n", - "> \n", - "> Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "> you may not use this file except in compliance with the License.\n", - "> You may obtain a copy of the License at\n", - "> \n", - "> http://www.apache.org/licenses/LICENSE-2.0\n", - "> \n", - "> Unless required by applicable law or agreed to in writing, software\n", - "> distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "> See the License for the specific language governing permissions and\n", - "> limitations under the License." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "SUkq9HAvYU7T" - }, - "outputs": [], - "source": [ - "\"\"\"\n", - "You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", - "4. Run this cell to set up dependencies# .\n", - "\"\"\"\n", - "BRANCH = 'r1.9.0'\n", - "# # If you're using Colab and not running locally, uncomment and run this cell.\n", - "# !apt-get install sox libsndfile1 ffmpeg\n", - "# !pip install wget unidecode\n", - "# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZivXzmq0YYLj" - }, - "source": [ - "# Tacotron2 and NeMo\n", - "\n", - "Tacotron2 is a neural network that converts text characters into a mel spectrogram. For more details on the model, please refer to Nvidia's [Tacotron2 Model Card](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_tacotron2), or the original [paper](https://arxiv.org/abs/1712.05884).\n", - "\n", - "Tacotron2 like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.\n", - "\n", - "Let's take a look using NeMo's pretrained model and how to use it to generate spectrograms." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "HEvdSU5WYZbj" - }, - "outputs": [], - "source": [ - "# Load the Tacotron2Model\n", - "from nemo.collections.tts.models import Tacotron2Model\n", - "from nemo.collections.tts.models.base import SpectrogramGenerator\n", - "\n", - "# Let's see what pretrained models are available\n", - "print(Tacotron2Model.list_available_models())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "3W8unatgYbUp" - }, - "outputs": [], - "source": [ - "# We can load the pre-trained model as follows\n", - "model = Tacotron2Model.from_pretrained(\"tts_en_tacotron2\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xsyBa9tIdHp4" - }, - "outputs": [], - "source": [ - "# Tacotron2 is a SpectrogramGenerator\n", - "assert isinstance(model, SpectrogramGenerator)\n", - "\n", - "# SpectrogramGenerators in NeMo have two helper functions:\n", - "# 1. parse(str_input: str, **kwargs) which takes an English string and produces a token tensor\n", - "# 2. generate_spectrogram(tokens: 'torch.tensor', **kwargs) which takes the token tensor and generates a spectrogram\n", - "# Let's try it out\n", - "tokens = model.parse(str_input = \"Hey, this produces speech!\")\n", - "spectrogram = model.generate_spectrogram(tokens = tokens)\n", - "\n", - "# Now we can visualize the generated spectrogram\n", - "# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.\n", - "# Refer to the TTS Inference notebook on how to convert spectrograms to speech.\n", - "from matplotlib.pyplot import imshow\n", - "from matplotlib import pyplot as plt\n", - "%matplotlib inline\n", - "imshow(spectrogram.cpu().detach().numpy()[0,...], origin=\"lower\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zZ90eCfdrNIf" - }, - "source": [ - "# Training\n", - "\n", - "Now that we looked at the Tacotron2 model, let's see how to train a Tacotron2 Model\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7rHG-LERrPRY" - }, - "outputs": [], - "source": [ - "# NeMo's training scripts are stored inside the examples/ folder. Let's grab the tacotron2.py file\n", - "# as well as the tacotron2.yaml file\n", - "!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/tacotron2.py\n", - "!mkdir -p conf && cd conf && wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/tacotron2.yaml && cd .." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Upv_LxBIsC51" - }, - "source": [ - "Let's take a look at the tacotron2.py file\n", - "\n", - "```python\n", - "import pytorch_lightning as pl\n", - "\n", - "from nemo.collections.common.callbacks import LogEpochTimeCallback\n", - "from nemo.collections.tts.models import Tacotron2Model\n", - "from nemo.core.config import hydra_runner\n", - "from nemo.utils.exp_manager import exp_manager\n", - "\n", - "\n", - "# hydra_runner is a thin NeMo wrapper around Hydra\n", - "# It looks for a config named tacotron2.yaml inside the conf folder\n", - "# Hydra parses the yaml and returns it as a Omegaconf DictConfig\n", - "@hydra_runner(config_path=\"conf\", config_name=\"tacotron2\")\n", - "def main(cfg):\n", - " # Define the Lightning trainer\n", - " trainer = pl.Trainer(**cfg.trainer)\n", - " # exp_manager is a NeMo construct that helps with logging and checkpointing\n", - " exp_manager(trainer, cfg.get(\"exp_manager\", None))\n", - " # Define the Tacotron 2 model, this will construct the model as well as\n", - " # define the training and validation dataloaders\n", - " model = Tacotron2Model(cfg=cfg.model, trainer=trainer)\n", - " # Let's add a few more callbacks\n", - " lr_logger = pl.callbacks.LearningRateMonitor()\n", - " epoch_time_logger = LogEpochTimeCallback()\n", - " trainer.callbacks.extend([lr_logger, epoch_time_logger])\n", - " # Call lightning trainer's fit() to train the model\n", - " trainer.fit(model)\n", - "\n", - "\n", - "if __name__ == '__main__':\n", - " main() # noqa pylint: disable=no-value-for-parameter\n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6nM-fZO-s75u" - }, - "source": [ - "Let's take a look at the yaml config\n", - "\n", - "```yaml\n", - "name: &name Tacotron2\n", - "sample_rate: &sr 22050\n", - "# , , will be added by the tacotron2.py script\n", - "labels: &labels [' ', '!', '\"', \"'\", '(', ')', ',', '-', '.', ':', ';', '?', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',\n", - " 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', ']',\n", - " 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\n", - " 'u', 'v', 'w', 'x', 'y', 'z']\n", - "n_fft: &n_fft 1024\n", - "n_mels: &n_mels 80\n", - "fmax: &fmax null\n", - "n_stride: &n_window_stride 256\n", - "pad_value: &pad_value -11.52\n", - "train_dataset: ???\n", - "validation_datasets: ???\n", - "```\n", - "\n", - "The first part of the yaml defines some parameters used by Tacotron. You can see\n", - "that the sample rate is set to 22050 for LJSpeech. You can also see that this\n", - "model has characters for labels instead of phones. To use phones as input,\n", - "see the GlowTTS yaml and setup for an example.\n", - "\n", - "Looking at the yaml, there is `train_dataset: ???` and `validation_datasets: ???`. The ??? indicates to hydra that these values must be passed via the command line or the script will fail.\n", - "\n", - "Looking further down the yaml, we get to the pytorch lightning trainer parameters.\n", - "\n", - "```yaml\n", - "trainer:\n", - " devices: 1 # number of gpus\n", - " accelerator: 'gpu' \n", - " max_epochs: ???\n", - " num_nodes: 1\n", - " accelerator: 'gpu'\n", - " strategy: 'dp'\n", - " accumulate_grad_batches: 1\n", - " enable_checkpointing: False # Provided by exp_manager\n", - " logger: False # Provided by exp_manager\n", - " gradient_clip_val: 1.0\n", - " log_every_n_steps: 200\n", - " check_val_every_n_epoch: 25\n", - "```\n", - "\n", - "These values can be changed either by editing the yaml or through the command line.\n", - "\n", - "Let's grab some simple audio data and test Tacotron2." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "GnEzODcorugt" - }, - "outputs": [], - "source": [ - "!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz \\\n", - " && mkdir -p tests/data \\\n", - " && tar xzf test_data.tar.gz -C tests/data\n", - "\n", - "# Just like ASR, the Tacotron2 require .json files to define the training and validation data.\n", - "!cat tests/data/asr/an4_val.json\n", - "\n", - "# Now that we have some sample data, we can try training Tacotron 2\n", - "# NOTE: The sample data is not enough data to properly train a Tacotron 2. This will not result in a trained Tacotron 2 and is used to illustrate how to train Tacotron 2 model\n", - "!python tacotron2.py \\\n", - "sample_rate=16000 \\\n", - "train_dataset=tests/data/asr/an4_train.json \\\n", - "validation_datasets=tests/data/asr/an4_val.json \\\n", - "trainer.max_epochs=3 \\\n", - "trainer.accelerator=null trainer.check_val_every_n_epoch=1 \\\n", - "+trainer.gpus=1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "9erGDGZJ1H_p" - }, - "source": [ - "# Training Data\n", - "\n", - "In order to train Tacotron2, it is highly recommended to obtain high quality speech data with the following properties:\n", - " - Sampling rate of 22050Hz or higher\n", - " - Single speaker\n", - " - Speech should contain a variety of speech phonemes\n", - " - Audio split into segments of 1-10 seconds\n", - " - Audio segments should not have silence at the beginning and end\n", - " - Audio segments should not contain long silences inside\n", - "\n", - "After obtaining the speech data and splitting into training, validation, and test sections, it is required to construct .json files to tell NeMo where to find these audio files.\n", - "\n", - "The .json files should adhere to the format required by the `nemo.collections.asr.data.audio_to_text.AudioToCharDataset` class. For example, here is a sample .json file\n", - "\n", - "```json\n", - "{\"audio_filepath\": \"/path/to/audio1.wav\", \"text\": \"the transcription\", \"duration\": 0.82}\n", - "{\"audio_filepath\": \"/path/to/audio2.wav\", \"text\": \"the other transcription\", \"duration\": 2.1}\n", - "...\n", - "```\n", - "Please note that the duration is in seconds.\n", - "\n", - "Lastly, update the labels inside the Tacotron 2 yaml config if your data contains a different set of characters.\n", - "\n", - "Then you are ready to run your training script:\n", - "```bash\n", - "python tacotron2.py train_dataset=YOUR_TRAIN.json validation_datasets=YOUR_VAL.json trainer.devices=-1\n", - "```" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "Taco2.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 1 + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "htbJiaJjYQAD" + }, + "source": [ + "# Tacotron 2 Training\n", + "\n", + "This notebook is designed to provide a guide on how to train Tacotron2 as part of the TTS pipeline. It contains the following sections\n", + "\n", + " 1. Tacotron2 and NeMo - An introduction to the Tacotron2 model\n", + " 2. LJSpeech - How to train Tacotron2 on LJSpeech\n", + " 3. Custom Datasets - How to collect audio data to train Tacotron2 for difference voices and languages" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wqPMTEXXYUP4" + }, + "source": [ + "# License\n", + "\n", + "> Copyright 2020 NVIDIA. All Rights Reserved.\n", + "> \n", + "> Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "> you may not use this file except in compliance with the License.\n", + "> You may obtain a copy of the License at\n", + "> \n", + "> http://www.apache.org/licenses/LICENSE-2.0\n", + "> \n", + "> Unless required by applicable law or agreed to in writing, software\n", + "> distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "> WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "> See the License for the specific language governing permissions and\n", + "> limitations under the License." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SUkq9HAvYU7T" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can either run this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", + "4. Run this cell to set up dependencies# .\n", + "\"\"\"\n", + "BRANCH = 'main'\n", + "# # If you're using Colab and not running locally, uncomment and run this cell.\n", + "# !apt-get install sox libsndfile1 ffmpeg\n", + "# !pip install wget unidecode\n", + "# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZivXzmq0YYLj" + }, + "source": [ + "# Tacotron2 and NeMo\n", + "\n", + "Tacotron2 is a neural network that converts text characters into a mel spectrogram. For more details on the model, please refer to Nvidia's [Tacotron2 Model Card](https://ngc.nvidia.com/catalog/models/nvidia:nemo:tts_en_tacotron2), or the original [paper](https://arxiv.org/abs/1712.05884).\n", + "\n", + "Tacotron2 like most NeMo models are defined as a LightningModule, allowing for easy training via PyTorch Lightning, and parameterized by a configuration, currently defined via a yaml file and loading using Hydra.\n", + "\n", + "Let's take a look using NeMo's pretrained model and how to use it to generate spectrograms." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HEvdSU5WYZbj" + }, + "outputs": [], + "source": [ + "# Load the Tacotron2Model\n", + "from nemo.collections.tts.models import Tacotron2Model\n", + "from nemo.collections.tts.models.base import SpectrogramGenerator\n", + "\n", + "# Let's see what pretrained models are available\n", + "print(Tacotron2Model.list_available_models())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "3W8unatgYbUp" + }, + "outputs": [], + "source": [ + "# We can load the pre-trained model as follows\n", + "model = Tacotron2Model.from_pretrained(\"tts_en_tacotron2\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xsyBa9tIdHp4" + }, + "outputs": [], + "source": [ + "# Tacotron2 is a SpectrogramGenerator\n", + "assert isinstance(model, SpectrogramGenerator)\n", + "\n", + "# SpectrogramGenerators in NeMo have two helper functions:\n", + "# 1. parse(self, text: str, normalize=True) which takes an English string and produces a token tensor\n", + "# 2. generate_spectrogram(self, *, tokens) which takes the token tensor and generates a spectrogram\n", + "# Let's try it out\n", + "tokens = model.parse(text = \"Hey, this produces speech!\")\n", + "spectrogram = model.generate_spectrogram(tokens = tokens)\n", + "\n", + "# Now we can visualize the generated spectrogram\n", + "# If we want to generate speech, we have to use a vocoder in conjunction to a spectrogram generator.\n", + "# Refer to the TTS Inference notebook on how to convert spectrograms to speech.\n", + "from matplotlib.pyplot import imshow\n", + "from matplotlib import pyplot as plt\n", + "%matplotlib inline\n", + "imshow(spectrogram.cpu().detach().numpy()[0,...], origin=\"lower\")\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zZ90eCfdrNIf" + }, + "source": [ + "# Training\n", + "\n", + "Now that we looked at the Tacotron2 model, let's see how to train a Tacotron2 Model\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "7rHG-LERrPRY" + }, + "outputs": [], + "source": [ + "# NeMo's training scripts are stored inside the examples/ folder. Let's grab the tacotron2.py file\n", + "# as well as the tacotron2.yaml file\n", + "!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/tacotron2.py\n", + "!mkdir -p conf && cd conf && wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/tts/conf/tacotron2.yaml && cd .." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Upv_LxBIsC51" + }, + "source": [ + "Let's take a look at the tacotron2.py file\n", + "\n", + "```python\n", + "import pytorch_lightning as pl\n", + "\n", + "from nemo.collections.common.callbacks import LogEpochTimeCallback\n", + "from nemo.collections.tts.models import Tacotron2Model\n", + "from nemo.core.config import hydra_runner\n", + "from nemo.utils.exp_manager import exp_manager\n", + "\n", + "\n", + "# hydra_runner is a thin NeMo wrapper around Hydra\n", + "# It looks for a config named tacotron2.yaml inside the conf folder\n", + "# Hydra parses the yaml and returns it as a Omegaconf DictConfig\n", + "@hydra_runner(config_path=\"conf\", config_name=\"tacotron2\")\n", + "def main(cfg):\n", + " # Define the Lightning trainer\n", + " trainer = pl.Trainer(**cfg.trainer)\n", + " # exp_manager is a NeMo construct that helps with logging and checkpointing\n", + " exp_manager(trainer, cfg.get(\"exp_manager\", None))\n", + " # Define the Tacotron 2 model, this will construct the model as well as\n", + " # define the training and validation dataloaders\n", + " model = Tacotron2Model(cfg=cfg.model, trainer=trainer)\n", + " # Let's add a few more callbacks\n", + " lr_logger = pl.callbacks.LearningRateMonitor()\n", + " epoch_time_logger = LogEpochTimeCallback()\n", + " trainer.callbacks.extend([lr_logger, epoch_time_logger])\n", + " # Call lightning trainer's fit() to train the model\n", + " trainer.fit(model)\n", + "\n", + "\n", + "if __name__ == '__main__':\n", + " main() # noqa pylint: disable=no-value-for-parameter\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6nM-fZO-s75u" + }, + "source": [ + "Let's take a look at the yaml config\n", + "\n", + "```yaml\n", + "name: &name Tacotron2\n", + "\n", + "train_dataset: ???\n", + "validation_datasets: ???\n", + "sup_data_path: null\n", + "sup_data_types: null\n", + "\n", + "phoneme_dict_path: \"scripts/tts_dataset_files/cmudict-0.7b_nv22.01\"\n", + "heteronyms_path: \"scripts/tts_dataset_files/heteronyms-030921\"\n", + "whitelist_path: \"nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv\"\n", + "```\n", + "\n", + "The first part of the yaml defines dataset parameters used by Tacotron. Then in the head of 'model' section there are processing - related parameters. You can see\n", + "that the sample rate is set to 22050 for LJSpeech. \n", + "\n", + "Looking at the yaml, there is `train_dataset: ???` and `validation_datasets: ???`. The ??? indicates to hydra that these values must be passed via the command line or the script will fail.\n", + "\n", + "Looking further down the yaml, we get to the pytorch lightning trainer parameters.\n", + "\n", + "```yaml\n", + "trainer:\n", + " devices: 1 # number of gpus\n", + " accelerator: 'gpu' \n", + " max_epochs: ???\n", + " num_nodes: 1\n", + " accelerator: 'gpu'\n", + " strategy: 'ddp'\n", + " accumulate_grad_batches: 1\n", + " enable_checkpointing: False # Provided by exp_manager\n", + " logger: False # Provided by exp_manager\n", + " gradient_clip_val: 1.0\n", + " log_every_n_steps: 200\n", + " check_val_every_n_epoch: 25\n", + "```\n", + "\n", + "These values can be changed either by editing the yaml or through the command line.\n", + "\n", + "Let's grab some simple audio data and test Tacotron2." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GnEzODcorugt" + }, + "outputs": [], + "source": [ + "!wget https://github.com/NVIDIA/NeMo/releases/download/v0.11.0/test_data.tar.gz && mkdir -p tests/data && tar xzf test_data.tar.gz -C tests/data\n", + "\n", + "# Just like ASR, the Tacotron2 require .json files to define the training and validation data.\n", + "!cat tests/data/asr/an4_val.json\n", + "\n", + "# Now that we have some sample data, we can try training Tacotron 2\n", + "# NOTE: The sample data is not enough data to properly train a Tacotron 2. This will not result in a trained Tacotron 2 and is used to illustrate how to train Tacotron 2 model\n", + "!python tacotron2.py sample_rate=16000 train_dataset=tests/data/asr/an4_train.json validation_datasets=tests/data/asr/an4_val.json trainer.max_epochs=3 trainer.accelerator=null trainer.check_val_every_n_epoch=1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9erGDGZJ1H_p" + }, + "source": [ + "# Training Data\n", + "\n", + "In order to train Tacotron2, it is highly recommended to obtain high quality speech data with the following properties:\n", + " - Sampling rate of 22050Hz or higher\n", + " - Single speaker\n", + " - Speech should contain a variety of speech phonemes\n", + " - Audio split into segments of 1-10 seconds\n", + " - Audio segments should not have silence at the beginning and end\n", + " - Audio segments should not contain long silences inside\n", + "\n", + "After obtaining the speech data and splitting into training, validation, and test sections, it is required to construct .json files to tell NeMo where to find these audio files.\n", + "\n", + "The .json files should adhere to the format required by the `nemo.collections.tts.torch.data.TTSDataset` class. For example, here is a sample .json file\n", + "\n", + "```json\n", + "{\"audio_filepath\": \"/path/to/audio1.wav\", \"text\": \"the transcription\", \"duration\": 0.82}\n", + "{\"audio_filepath\": \"/path/to/audio2.wav\", \"text\": \"the other transcription\", \"duration\": 2.1}\n", + "...\n", + "```\n", + "Please note that the duration is in seconds.\n", + "\n", + "\n", + "Then you are ready to run your training script:\n", + "```bash\n", + "python tacotron2.py train_dataset=YOUR_TRAIN.json validation_datasets=YOUR_VAL.json trainer.devices=-1\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "Taco2.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.6" + } + }, + "nbformat": 4, + "nbformat_minor": 1 } \ No newline at end of file From 1f3788e3c09fe96172490a0d9fc6782cc86ffd57 Mon Sep 17 00:00:00 2001 From: Nithin Rao Date: Wed, 11 May 2022 10:28:55 -0700 Subject: [PATCH 107/244] Multiprocess improvements (#4127) * initial commit Signed-off-by: nithinraok * start fix Signed-off-by: nithinraok * improve multiprocessing speed while creating speaker dataset Signed-off-by: nithinraok * updated scp to filelist Signed-off-by: nithinraok --- nemo/collections/asr/models/label_models.py | 1 - ...to_manifest.py => filelist_to_manifest.py} | 133 ++++++++---------- 2 files changed, 59 insertions(+), 75 deletions(-) rename scripts/speaker_tasks/{scp_to_manifest.py => filelist_to_manifest.py} (67%) diff --git a/nemo/collections/asr/models/label_models.py b/nemo/collections/asr/models/label_models.py index 83f0572c6afd..9c3a468bbdff 100644 --- a/nemo/collections/asr/models/label_models.py +++ b/nemo/collections/asr/models/label_models.py @@ -251,7 +251,6 @@ def output_types(self) -> Optional[Dict[str, NeuralType]]: "embs": NeuralType(('B', 'D'), AcousticEncodedRepresentation()), } - @typecheck() def forward_for_export(self, processed_signal, processed_signal_len): encoded, length = self.encoder(audio_signal=processed_signal, length=processed_signal_len) logits, embs = self.decoder(encoder_output=encoded, length=length) diff --git a/scripts/speaker_tasks/scp_to_manifest.py b/scripts/speaker_tasks/filelist_to_manifest.py similarity index 67% rename from scripts/speaker_tasks/scp_to_manifest.py rename to scripts/speaker_tasks/filelist_to_manifest.py index c146b13e1742..18ad6579a551 100644 --- a/scripts/speaker_tasks/scp_to_manifest.py +++ b/scripts/speaker_tasks/filelist_to_manifest.py @@ -14,7 +14,6 @@ import argparse import json -import multiprocessing import os import random @@ -28,26 +27,30 @@ random.seed(42) """ -This scipt converts a scp file where each line contains - -to a manifest json file. +This scipt converts a filelist file where each line contains + to a manifest json file. +Optionally post processes the manifest file to create dev and train split for speaker embedding +training, also optionally chunk an audio file in to segments of random DURATIONS and create those +wav files in CWD. + +While creating chunks, if audio is not sampled at 16Khz, it resamples to 16Khz and write the wav file. Args: ---scp: scp file name ---manifest(optional): if you already have manifest file, but would like to process it for creating chunks and splitting then use manifest ignoring scp ---id: index of speaker label in filename present in scp file that is separated by '/' +--filelist: path to file containing list of audio files +--manifest(optional): if you already have manifest file, but would like to process it for creating chunks and splitting then use manifest ignoring filelist +--id: index of speaker label in filename present in filelist file that is separated by '/' --out: output manifest file name ---split: True / False if you would want to split the manifest file for training purposes +--split: if you would want to split the manifest file for training purposes you may not need this for test set. output file names is _.json Defaults to False ---create_chunks: bool if you would want to chunk each manifest line to chunks of 3 sec or less +--create_chunks:if you would want to chunk each manifest line to chunks of 4 sec or less you may not need this for test set, Defaults to False ---write_chunks: writes chunked files based on offset to {current working directory}/chunks/{label}/{original_file_name}_{offset}_{duration}.wav --min_spkrs_count: min number of samples per speaker to consider and ignore otherwise """ -DURATIONS = [1.5, 2, 3] +DURATIONS = sorted([1, 2, 3, 4], reverse=True) MIN_ENERGY = 0.01 -CWD = './' +CWD = os.getcwd() +SAMPLE_RATE = 16000 def filter_manifest_line(manifest_line): @@ -55,22 +58,45 @@ def filter_manifest_line(manifest_line): audio_path = manifest_line['audio_filepath'] start = manifest_line.get('offset', 0) dur = manifest_line['duration'] + label = manifest_line['label'] + endname = os.path.splitext(audio_path.split(label, 1)[-1])[0] + to_path = os.path.join(CWD, 'chunks', label) + to_path = os.path.join(to_path, endname[1:]) + os.makedirs(os.path.dirname(to_path), exist_ok=True) if dur >= min(DURATIONS): - signal, sr = l.load(audio_path, sr=None) - remaining_dur = dur - temp_dur = random.choice(DURATIONS) - remaining_dur = remaining_dur - temp_dur - while remaining_dur >= 0: + signal, sr = l.load(audio_path, sr=SAMPLE_RATE) + remaining_dur = dur - start + + segments = DURATIONS.copy() + mode = int(remaining_dur // sum(DURATIONS)) + rem = remaining_dur % sum(DURATIONS) + segments = mode * segments + + for val in DURATIONS: + if rem >= val: + segments.append(val) + rem = rem - val + + for temp_dur in segments: segment_audio = signal[int(start * sr) : int(start * sr + temp_dur * sr)] if l.feature.rms(y=segment_audio).mean() > MIN_ENERGY: + final_string = '_' + str(start) + '_' + str(temp_dur) + final_string = final_string.replace('.', '-') + to_file = to_path + final_string + '.wav' + + c_start = int(float(start * sr)) + c_end = c_start + int(float(temp_dur * sr)) + chunk = signal[c_start:c_end] + sf.write(to_file, chunk, sr) + meta = manifest_line.copy() - meta['offset'] = start + meta['audio_filepath'] = to_file + meta['offset'] = 0 meta['duration'] = temp_dur split_manifest.append(meta) + start = start + temp_dur - temp_dur = random.choice(DURATIONS) - remaining_dur = remaining_dur - temp_dur return split_manifest @@ -106,9 +132,9 @@ def write_file(name, lines, idx): print("wrote", name) -def read_file(scp_file, id=-1): +def read_file(filelist, id=-1): json_lines = [] - with open(scp_file, 'r') as fo: + with open(filelist, 'r') as fo: lines = fo.readlines() lines = sorted(lines) for line in lines: @@ -146,41 +172,21 @@ def get_labels(lines): return labels -def write_audio_file(line): - filename = line['audio_filepath'] - label = line['label'] - offset = line['offset'] - duration = line['duration'] - basename = os.path.basename(filename).replace('.wav', '') - to_path = os.path.join(CWD, 'chunks', label) - os.makedirs(to_path, exist_ok=True) - to_path = os.path.join(to_path, basename) - final_string = '_' + str(offset) + '_' + str(duration) - final_string = final_string.replace('.', '-') - samples, sr = sf.read(filename) - start = int(float(offset * sr)) - end = start + int(float(duration * sr)) - chunk = samples[start:end] - to_file = to_path + final_string + '.wav' - sf.write(to_file, chunk, sr) - - line['offset'] = 0 - line['audio_filepath'] = to_file - return line - - -def main(scp, manifest, id, out, split=False, create_chunks=False, write_chunks=False, min_count=10, workers=4): +def main(filelist, manifest, id, out, split=False, create_chunks=False, min_count=10): if os.path.exists(out): os.remove(out) - if scp: - lines = read_file(scp_file=scp, id=id) + if filelist: + lines = read_file(filelist=filelist, id=id) + lines = process_map(get_duration, lines, chunksize=100) + out_file = os.path.splitext(filelist)[0] + '_manifest.json' + write_file(out_file, lines, range(len(lines))) else: lines = read_manifest(manifest) lines = process_map(get_duration, lines, chunksize=100) if create_chunks: - print("creating chunk") + print(f"creating and writing chunks to {CWD}") lines = process_map(filter_manifest_line, lines, chunksize=100) temp = [] for line in lines: @@ -188,10 +194,6 @@ def main(scp, manifest, id, out, split=False, create_chunks=False, write_chunks= del lines lines = temp - if create_chunks and write_chunks: - print("writing chunks created before as new wav files") - lines = process_map(write_audio_file, lines, chunksize=100) - speakers = [x['label'] for x in lines] if min_count: @@ -213,11 +215,11 @@ def main(scp, manifest, id, out, split=False, create_chunks=False, write_chunks= if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument("--scp", help="scp file name", type=str, required=False, default=None) + parser.add_argument("--filelist", help="path to filelist file", type=str, required=False, default=None) parser.add_argument("--manifest", help="manifest file name", type=str, required=False, default=None) parser.add_argument( "--id", - help="field num seperated by '/' to be considered as speaker label from scp file, can be ignored if manifest file is already provided with labels", + help="field num seperated by '/' to be considered as speaker label from filelist file, can be ignored if manifest file is already provided with labels", type=int, required=False, default=None, @@ -231,13 +233,7 @@ def main(scp, manifest, id, out, split=False, create_chunks=False, write_chunks= ) parser.add_argument( "--create_chunks", - help="bool if you would want to chunk each manifest line to chunks of 3 sec or less", - required=False, - action='store_true', - ) - parser.add_argument( - "--write_chunks", - help="bool if you would want to write the chunks created with --create_chunk to CWD ", + help="bool if you would want to chunk each manifest line to chunks of 4 sec or less", required=False, action='store_true', ) @@ -247,20 +243,9 @@ def main(scp, manifest, id, out, split=False, create_chunks=False, write_chunks= type=int, help="min number of samples per speaker to consider and ignore otherwise", ) - parser.add_argument( - "--num_workers", default=multiprocessing.cpu_count(), type=int, help="Workers to process dataset." - ) args = parser.parse_args() main( - args.scp, - args.manifest, - args.id, - args.out, - args.split, - args.create_chunks, - args.write_chunks, - args.min_spkrs_count, - args.num_workers, + args.filelist, args.manifest, args.id, args.out, args.split, args.create_chunks, args.min_spkrs_count, ) From 46162c6aba967a78914f0cc43d8abb9267d8e93c Mon Sep 17 00:00:00 2001 From: Jocelyn Date: Wed, 11 May 2022 13:42:13 -0700 Subject: [PATCH 108/244] WaveGlow input type fixes (#4151) Signed-off-by: Jocelyn Huang --- nemo/collections/tts/models/waveglow.py | 4 +--- nemo/collections/tts/modules/waveglow.py | 5 +++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/nemo/collections/tts/models/waveglow.py b/nemo/collections/tts/models/waveglow.py index a1462781ddc0..a1a522a44c0c 100644 --- a/nemo/collections/tts/models/waveglow.py +++ b/nemo/collections/tts/models/waveglow.py @@ -88,9 +88,7 @@ def convert_spectrogram_to_audio( ) -> torch.Tensor: with self.nemo_infer(): self.waveglow.remove_weightnorm() - audio = self.waveglow( - spec=spec.to(self.waveglow.upsample.weight.dtype), run_inverse=True, audio=None, sigma=sigma - ) + audio = self.waveglow(spec=spec.to(self.waveglow.upsample.weight.dtype), sigma=sigma) if denoise: audio = self.denoise(audio=audio, strength=denoiser_strength) diff --git a/nemo/collections/tts/modules/waveglow.py b/nemo/collections/tts/modules/waveglow.py index daa5405298db..e270cb76f8d3 100644 --- a/nemo/collections/tts/modules/waveglow.py +++ b/nemo/collections/tts/modules/waveglow.py @@ -134,12 +134,13 @@ def input_types(self): if self.mode == OperationMode.infer: return { "spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), - "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), + "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType(), optional=True), + "sigma": NeuralType(optional=True), } else: return { "spec": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), - "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType()), + "z": NeuralType(('B', 'D', 'T'), MelSpectrogramType(), optional=True), "audio": NeuralType(('B', 'T'), AudioSignal(), optional=True), "run_inverse": NeuralType(elements_type=IntType(), optional=True), "sigma": NeuralType(optional=True), From b34609f63415dfee8e0a8de549f9736d3b53c886 Mon Sep 17 00:00:00 2001 From: fayejf <36722593+fayejf@users.noreply.github.com> Date: Thu, 12 May 2022 10:32:28 -0700 Subject: [PATCH 109/244] notebooks' link, typo and import fix (#4158) * redo missing pr 4007 Signed-off-by: fayejf * remove extremely unreliable links Signed-off-by: fayejf --- .../asr/Offline_ASR_with_VAD_for_CTC_models.ipynb | 8 ++++---- tutorials/asr/Speech_Commands.ipynb | 10 +++------- tutorials/asr/Voice_Activity_Detection.ipynb | 6 +++--- .../Speaker_Identification_Verification.ipynb | 10 +++------- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb index af6c8ffc477e..61675ec37e87 100644 --- a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb +++ b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb @@ -43,7 +43,7 @@ "import torch\n", "import os\n", "from nemo.collections.asr.metrics.wer import word_error_rate\n", - "from nemo.collections.asr.parts.utils.vad_utils import stitch_segmented_asr_output, contruct_manfiest_eval" + "from nemo.collections.asr.parts.utils.vad_utils import stitch_segmented_asr_output, construct_manfiest_eval" ] }, { @@ -320,7 +320,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If we have ground-truth 'text' in input_manifest, we can evaluate our performance of stitched output. Let's align the 'text' in input manifest and 'pred_text' in stitched segmented asr output first, since some samples from input_manfiest might be pure noise and have been removed in VAD output and excluded for ASR inference. " + "If we have ground-truth 'text' in input_manifest, we can evaluate our performance of stitched output. Let's align the 'text' in input manifest and 'pred_text' in stitched segmented asr output first, since some samples from input_manifest might be pure noise and have been removed in VAD output and excluded for ASR inference. " ] }, { @@ -329,7 +329,7 @@ "metadata": {}, "outputs": [], "source": [ - "aligned_vad_asr_output_manifest = contruct_manfiest_eval(input_manifest, stitched_output_manifest)" + "aligned_vad_asr_output_manifest = construct_manifest_eval(input_manifest, stitched_output_manifest)" ] }, { @@ -386,4 +386,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index fc40552aca1c..13c37c33455a 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -643,17 +643,13 @@ "\n", "We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.\n", "\n", - "For multi-GPU training, take a look at [the PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/latest/advanced/multi_gpu.html)\n", - "\n", - "For mixed-precision training, take a look at [the PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html#mixed-precision-16-bit-training)\n", - "\n", "```python\n", - "# Mixed precision:\n", - "trainer = Trainer(amp_level='O1', precision=16)\n", - "\n", "# Trainer with a distributed backend:\n", "trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n", "\n", + "# Mixed precision:\n", + "trainer = Trainer(amp_level='O1', precision=16)\n", + "\n", "# Of course, you can combine these flags as well.\n", "```" ] diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index 19a687e0b217..3c7b848c6d5e 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -657,12 +657,12 @@ "We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.\n", "\n", "```python\n", - "# Mixed precision:\n", - "trainer = Trainer(amp_level='O1', precision=16)\n", - "\n", "# Trainer with a distributed backend:\n", "trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n", "\n", + "# Mixed precision:\n", + "trainer = Trainer(amp_level='O1', precision=16)\n", + "\n", "# Of course, you can combine these flags as well.\n", "```" ] diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index 5e5b5c9fd4ba..f2d0a45327a2 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -628,18 +628,14 @@ "## For Faster Training\n", "We can dramatically improve the time taken to train this model by using Multi GPU training along with Mixed Precision.\n", "\n", - "For multi-GPU training, take a look at the [PyTorch Lightning Multi-GPU training section](https://pytorch-lightning.readthedocs.io/en/stable/advanced/multi_gpu.html)\n", - "\n", - "For mixed-precision training, take a look at the [PyTorch Lightning Mixed-Precision training section](https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html#mixed-precision-16-bit-training)\n", + "### Trainer with a distributed backend:\n", + "

trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n",
+                "
\n", "\n", "### Mixed precision:\n", "
trainer = Trainer(amp_level='O1', precision=16)\n",
                 "
\n", "\n", - "### Trainer with a distributed backend:\n", - "
trainer = Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n",
-                "
\n", - "\n", "Of course, you can combine these flags as well." ] }, From 0704e14d40f993a1db1afbafe0fb496ccc45a70c Mon Sep 17 00:00:00 2001 From: bene-ges <61418381+bene-ges@users.noreply.github.com> Date: Fri, 13 May 2022 02:43:55 +0300 Subject: [PATCH 110/244] Thutmose tagger bug fixes (#4162) * add pretrained ngc model, small fixes Signed-off-by: Alexandra Antonova * fix model location Signed-off-by: Alexandra Antonova * fix model location Signed-off-by: Alexandra Antonova * 1. fix typos. 2. write magic functions without space Signed-off-by: Alexandra Antonova * add example of inference with pretrained model Signed-off-by: Alexandra Antonova * changed model location to nemo Signed-off-by: Alexandra Antonova * style fix Signed-off-by: Alexandra Antonova * fix space Signed-off-by: Alexandra Antonova Co-authored-by: Alexandra Antonova --- .../conf/thutmose_tagger_itn_config.yaml | 4 - .../thutmose_tagger.py | 11 +- .../ITN_with_Thutmose_Tagger.ipynb | 206 +++++++++++++----- 3 files changed, 159 insertions(+), 62 deletions(-) diff --git a/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml b/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml index 7dc36b78d9e9..37ee85e7c53b 100644 --- a/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml +++ b/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml @@ -77,8 +77,6 @@ data: data_path: ??? # provide the full path to the file batch_size: 8 shuffle: true - max_insts: -1 # Maximum number of instances (-1 means no limit) - use_cache: false # uses a cache to store the processed dataset, you may use it for large datasets for speed up (especially when using multi GPUs) num_workers: 3 pin_memory: false drop_last: false @@ -87,8 +85,6 @@ data: data_path: ??? # provide the full path to the file. batch_size: 8 shuffle: false - max_insts: -1 # Maximum number of instances (-1 means no limit) - use_cache: false # uses a cache to store the processed dataset, you may use it for large datasets for speed up (especially when using multi GPUs) num_workers: 3 pin_memory: false drop_last: false diff --git a/nemo/collections/nlp/models/text_normalization_as_tagging/thutmose_tagger.py b/nemo/collections/nlp/models/text_normalization_as_tagging/thutmose_tagger.py index 1030dd9ae287..5fe13e07af4b 100644 --- a/nemo/collections/nlp/models/text_normalization_as_tagging/thutmose_tagger.py +++ b/nemo/collections/nlp/models/text_normalization_as_tagging/thutmose_tagger.py @@ -297,6 +297,7 @@ def _infer(self, sents: List[str]) -> List[List[int]]: - input words - tags predicted for input words - tags after swap preprocessing + - semiotic labels predicted for input words """ # all input sentences go into one batch @@ -406,4 +407,12 @@ def _setup_infer_dataloader(self, cfg: DictConfig, queries: List[str]) -> 'torch @classmethod def list_available_models(cls) -> Optional[PretrainedModelInfo]: - pass + result = [ + PretrainedModelInfo( + pretrained_model_name="itn_en_thutmose_bert", + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/itn_en_thutmose_bert/versions/1.9.0/files/itn_en_thutmose_bert.nemo", + description="A single-pass tagger-based model for inverse text normalization based" + "on bert-base-uncased, trained on 2 mln sentences from Google Text Normalization Dataset", + ), + ] + return result diff --git a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb index 50bf88a1e25c..3ee9d319515f 100644 --- a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb +++ b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb @@ -32,10 +32,10 @@ "\n", "# option #2: download NeMo repo\n", "if 'google.colab' in str(get_ipython()) or not os.path.exists(NEMO_DIR_PATH):\n", - " ! git clone -b $BRANCH https://github.com/{GITHUB_ACCOUNT}/NeMo\n", - " % cd NeMo\n", - " ! python -m pip install git+https://github.com/{GITHUB_ACCOUNT}/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", - " % cd .." + " !git clone -b $BRANCH https://github.com/{GITHUB_ACCOUNT}/NeMo\n", + " %cd NeMo\n", + " !python -m pip install git+https://github.com/{GITHUB_ACCOUNT}/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", + " %cd .." ] }, { @@ -49,8 +49,8 @@ "# If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error:\n", "# 'ImportError: IProgress not found. Please update jupyter and ipywidgets.'\n", "\n", - "! pip install ipywidgets\n", - "! jupyter nbextension enable --py widgetsnbextension\n", + "!pip install ipywidgets\n", + "!jupyter nbextension enable --py widgetsnbextension\n", "\n", "# Please restart the kernel after running this cell" ] @@ -113,8 +113,8 @@ }, "outputs": [], "source": [ - "! wget \"https://multilangaudiosamples.s3.us-east-2.amazonaws.com/en_data_small.zip\" \".\"\n", - "! unzip en_data_small" + "!wget \"https://multilangaudiosamples.s3.us-east-2.amazonaws.com/en_data_small.zip\"\n", + "!unzip en_data_small" ] }, { @@ -218,10 +218,10 @@ }, "outputs": [], "source": [ - "! git clone https://github.com/moses-smt/giza-pp.git giza-pp\n", + "!git clone https://github.com/moses-smt/giza-pp.git giza-pp\n", "%cd giza-pp\n", - "! ls\n", - "! make\n", + "!ls\n", + "!make\n", "%cd .." ] }, @@ -359,7 +359,7 @@ "Let's run GIZA++ alignment. \n", "In this tutorial we only work with three semiotic classes: date, letters and cardinal (in real setting all classes are used, excluding punct).\n", "\n", - "**Attention**: the environment variable USER should be defined with any value, otherwise GIZA++ ends with segmenation fault. " + "**Attention**: the environment variable USER should be defined with any value, otherwise GIZA++ ends with segmentation fault. " ], "metadata": { "id": "uUQMhEKGT7gv" @@ -389,15 +389,15 @@ "## It is necessary to specify environment variable USER=, otherwise GIZA++ terminates with a segfault \n", "\n", "%cd {ALIGNMENT_DIR}/date\n", - "! export USER=\"user\"; ./run.sh\n", + "!export USER=\"user\"; ./run.sh\n", "%cd ../..\n", "\n", "%cd {ALIGNMENT_DIR}/letters\n", - "! export USER=\"user\"; ./run.sh\n", + "!export USER=\"user\"; ./run.sh\n", "%cd ../..\n", "\n", "%cd {ALIGNMENT_DIR}/cardinal\n", - "! export USER=\"user\"; ./run.sh\n", + "!export USER=\"user\"; ./run.sh\n", "%cd ../.." ] }, @@ -418,19 +418,19 @@ }, "outputs": [], "source": [ - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", " --mode=itn \\\n", " --giza_dir={ALIGNMENT_DIR}/date \\\n", " --giza_suffix=\"A3.final\" \\\n", " --out_filename=itn.out \\\n", " --lang={CORPUS_LANG}\n", - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", " --mode=itn \\\n", " --giza_dir={ALIGNMENT_DIR}/letters \\\n", " --giza_suffix=\"A3.final\" \\\n", " --out_filename=itn.out \\\n", " --lang={CORPUS_LANG}\n", - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py \\\n", " --mode=itn \\\n", " --giza_dir={ALIGNMENT_DIR}/cardinal \\\n", " --giza_suffix=\"A3.final\" \\\n", @@ -441,7 +441,7 @@ { "cell_type": "markdown", "source": [ - "When we prepared the input corpus of ITN pairs for GIZA++, we uniqualized them and stored the frequencies in a separate file `freq`. Now let's append the frequencies to the resulting alignments." + "When we prepared the input corpus of ITN pairs for GIZA++, we made them unique and stored the frequencies in a separate file `freq`. Now let's append the frequencies to the resulting alignments." ], "metadata": { "id": "vpqiKrS6XBlP" @@ -519,7 +519,7 @@ }, "outputs": [], "source": [ - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", " --mode=get_replacement_vocab \\\n", " --giza_dir={ALIGNMENT_DIR} \\\n", " --alignment_filename=itn.out2 \\\n", @@ -559,10 +559,10 @@ }, "outputs": [], "source": [ - "! head -n 150 replacement_vocab_full.txt.cardinal > replacement_vocab_cardinal.txt\n", - "! head -n 150 replacement_vocab_full.txt.date > replacement_vocab_date.txt\n", - "! head -n 150 replacement_vocab_full.txt.letters > replacement_vocab_letters.txt\n", - "! cat replacement_vocab_cardinal.txt \\\n", + "!head -n 150 replacement_vocab_full.txt.cardinal > replacement_vocab_cardinal.txt\n", + "!head -n 150 replacement_vocab_full.txt.date > replacement_vocab_date.txt\n", + "!head -n 150 replacement_vocab_full.txt.letters > replacement_vocab_letters.txt\n", + "!cat replacement_vocab_cardinal.txt \\\n", " replacement_vocab_date.txt \\\n", " replacement_vocab_letters.txt > replacement_vocab.select.txt\n" ] @@ -584,7 +584,7 @@ }, "outputs": [], "source": [ - "! wc -l replacement_vocab.select.txt" + "!wc -l replacement_vocab.select.txt" ] }, { @@ -595,7 +595,7 @@ }, "outputs": [], "source": [ - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py \\\n", " --mode=filter_by_vocab \\\n", " --giza_dir={ALIGNMENT_DIR} \\\n", " --alignment_filename=itn.out2 \\\n", @@ -626,8 +626,8 @@ }, "outputs": [], "source": [ - "! wc -l {ALIGNMENT_DIR}/cardinal/itn.out2\n", - "! wc -l {ALIGNMENT_DIR}/cardinal/itn.select.out\n" + "!wc -l {ALIGNMENT_DIR}/cardinal/itn.out2\n", + "!wc -l {ALIGNMENT_DIR}/cardinal/itn.select.out\n" ] }, { @@ -719,7 +719,7 @@ "The semiotic spans are used for two purposes:\n", " \n", "1. During validation step we calculate accuracy w.r.t. semiotic spans. For example, a DATE span is correct if **all** tag predictions inside this span match the ground truth labels.\n", - "2. The model has additional classiffication head that predicts a semiotic class label for each of the input words. These predictions are used in the post-processing step for better handling of swaps.\n", + "2. The model has additional classification head that predicts a semiotic class label for each of the input words. These predictions are used in the post-processing step for better handling of swaps.\n", "\n" ] }, @@ -731,7 +731,7 @@ }, "outputs": [], "source": [ - "! head {CORPUS_DIR}/dev.labeled" + "!head {CORPUS_DIR}/dev.labeled" ] }, { @@ -763,7 +763,7 @@ }, "outputs": [], "source": [ - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/get_label_vocab.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/dataset_preparation/get_label_vocab.py \\\n", " --train_filename={CORPUS_DIR}/train.labeled \\\n", " --dev_filename={CORPUS_DIR}/dev.labeled \\\n", " --out_filename={CORPUS_DIR}/label_map.txt\n" @@ -777,7 +777,7 @@ }, "outputs": [], "source": [ - "! head {CORPUS_DIR}/label_map.txt" + "!head {CORPUS_DIR}/label_map.txt" ] }, { @@ -788,22 +788,22 @@ }, "outputs": [], "source": [ - "! echo \"ADDRESS\" > {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"CARDINAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"DATE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"DECIMAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"DIGIT\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"ELECTRONIC\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"FRACTION\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"LETTERS\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"MEASURE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"MONEY\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"ORDINAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"PLAIN\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"PUNCT\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"TELEPHONE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"TIME\" >> {CORPUS_DIR}/semiotic_classes.txt\n", - "! echo \"VERBATIM\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"ADDRESS\" > {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"CARDINAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"DATE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"DECIMAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"DIGIT\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"ELECTRONIC\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"FRACTION\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"LETTERS\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"MEASURE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"MONEY\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"ORDINAL\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"PLAIN\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"PUNCT\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"TELEPHONE\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"TIME\" >> {CORPUS_DIR}/semiotic_classes.txt\n", + "!echo \"VERBATIM\" >> {CORPUS_DIR}/semiotic_classes.txt\n", "\n" ] }, @@ -815,10 +815,10 @@ }, "outputs": [], "source": [ - "! mkdir {WORK_DIR}/datasets\n", + "!mkdir {WORK_DIR}/datasets\n", "\n", - "! cp {CORPUS_DIR}/label_map.txt {WORK_DIR}/datasets/label_map.txt\n", - "! cp {CORPUS_DIR}/semiotic_classes.txt {WORK_DIR}/datasets/semiotic_classes.txt\n" + "!cp {CORPUS_DIR}/label_map.txt {WORK_DIR}/datasets/label_map.txt\n", + "!cp {CORPUS_DIR}/semiotic_classes.txt {WORK_DIR}/datasets/semiotic_classes.txt\n" ] }, { @@ -837,7 +837,7 @@ "cell_type": "code", "source": [ "DATASET = WORK_DIR + \"/datasets/itn_sample10k\"\n", - "! mkdir {DATASET}\n", + "!mkdir {DATASET}\n", "!head -n 5000 {CORPUS_DIR}/train.labeled > {DATASET}/train.tsv\n", "!head -n 5000 {CORPUS_DIR}/dev.labeled > {DATASET}/valid.tsv\n", "!cp {DATASET}/valid.tsv {DATASET}/test.tsv\n" @@ -861,7 +861,7 @@ { "cell_type": "code", "source": [ - "! python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py \\\n", + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py \\\n", " lang=en \\\n", " data.validation_ds.data_path={DATASET}/valid.tsv \\\n", " data.train_ds.data_path={DATASET}/train.tsv \\\n", @@ -894,7 +894,7 @@ "cell_type": "code", "source": [ "# the log can be found in nemo_experiments folder\n", - "! cat nemo_experiments/training/*/nemo_log_globalrank-0_localrank-0.txt" + "!cat nemo_experiments/training/*/nemo_log_globalrank-0_localrank-0.txt" ], "metadata": { "id": "gO1nez6AWJeW" @@ -944,8 +944,8 @@ { "cell_type": "code", "source": [ - "! echo \"on the ninth of may four days after her arrival at new orleans west carnifax was decommissioned and returned to the u s s b\" > test_sent.txt\n", - "! echo \"retrieved the fourth of october twenty fifteen\" >> test_sent.txt" + "!echo \"on the ninth of may four days after her arrival at new orleans west carnifax was decommissioned and returned to the u s s b\" > test_sent.txt\n", + "!echo \"retrieved the fourth of october twenty fifteen\" >> test_sent.txt" ], "metadata": { "id": "30KlsQ6uY6vu" @@ -979,7 +979,7 @@ { "cell_type": "code", "source": [ - "! cat test_sent.output" + "!cat test_sent.output" ], "metadata": { "id": "jrGJb9DcZ83E" @@ -1025,6 +1025,98 @@ "metadata": { "id": "AY9sQCIcUEGO" } + }, + { + "cell_type": "markdown", + "source": [ + "# Inference with a pretrained model\n", + "\n", + "We can also run inference with a pretrained model [itn_en_thutmose_bert](https://catalog.ngc.nvidia.com/orgs/nvidia/models/itn_en_thutmose_bert).\n", + "This is how to use it directly from python." + ], + "metadata": { + "id": "cMYFQLbaY3m-" + } + }, + { + "cell_type": "code", + "source": [ + "thutmose = nemo_nlp.models.ThutmoseTaggerModel.from_pretrained('itn_en_thutmose_bert')\n", + "thutmose.summarize()" + ], + "metadata": { + "id": "8Uor5qqcYgGF" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing\n", + "\n", + "lines = [\"on the ninth of may four days after her arrival at new orleans west carnifax was decommissioned and returned to the u s s b\",\n", + " \"retrieved the fourth of october twenty fifteen\"]\n", + "\n", + "\n", + "batch, all_preds = [], []\n", + "for i, line in enumerate(lines):\n", + " s = spoken_preprocessing(line) # this is the same input transformation as in corpus preparation\n", + " batch.append(s.strip())\n", + " outputs = thutmose._infer(batch)\n", + "for x in outputs:\n", + " all_preds.append(x)\n", + "\n", + "if len(all_preds) != len(lines):\n", + " raise ValueError(\n", + " \"number of input lines and predictions is different: predictions=\"\n", + " + str(len(all_preds))\n", + " + \"; lines=\"\n", + " + str(len(lines))\n", + " )\n", + "\n", + "for i in range(len(all_preds)):\n", + " print (all_preds[i])" + ], + "metadata": { + "id": "FcOiYPJwZzS0" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Or we can use the inference script" + ], + "metadata": { + "id": "sJguLcWhaFdE" + } + }, + { + "cell_type": "code", + "source": [ + "!python {NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py \\\n", + " pretrained_model=itn_en_thutmose_bert \\\n", + " inference.from_file=./test_sent.txt \\\n", + " inference.out_file=./test_sent.output" + ], + "metadata": { + "id": "4R1hRpU-aMUs" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!cat test_sent.output" + ], + "metadata": { + "id": "E0qVTUyvaQt1" + }, + "execution_count": null, + "outputs": [] } ], "metadata": { From 4bbe6fb86a06466beee697f8d3565dedb9ec2882 Mon Sep 17 00:00:00 2001 From: Nithin Rao Date: Fri, 13 May 2022 16:26:56 -0700 Subject: [PATCH 111/244] update speaker docs (#4164) * update speaker docs Signed-off-by: nithinraok * chunks -> segments Signed-off-by: nithinraok * Khz -> kHz Signed-off-by: nithinraok --- .../asr/speaker_diarization/datasets.rst | 10 ++--- .../asr/speaker_recognition/datasets.rst | 34 ++++++++--------- examples/speaker_tasks/recognition/README.md | 14 +++---- scripts/dataset_processing/get_hi-mia_data.py | 4 +- scripts/speaker_tasks/filelist_to_manifest.py | 38 +++++++++---------- ...st.py => pathfiles_to_diarize_manifest.py} | 0 .../ASR_with_SpeakerDiarization.ipynb | 4 +- .../Speaker_Diarization_Inference.ipynb | 4 +- .../Speaker_Identification_Verification.ipynb | 22 +++++------ 9 files changed, 65 insertions(+), 65 deletions(-) rename scripts/speaker_tasks/{pathsfiles_to_manifest.py => pathfiles_to_diarize_manifest.py} (100%) diff --git a/docs/source/asr/speaker_diarization/datasets.rst b/docs/source/asr/speaker_diarization/datasets.rst index d2c8d2a93944..ab38243fbb81 100644 --- a/docs/source/asr/speaker_diarization/datasets.rst +++ b/docs/source/asr/speaker_diarization/datasets.rst @@ -14,11 +14,11 @@ Diarization inference is based on Hydra configurations which are fulfilled by `` {"audio_filepath": "/path/to/abcd.wav", "offset": 0, "duration": null, "label": "infer", "text": "-", "num_speakers": null, "rttm_filepath": "/path/to/rttm/abcd.rttm", "uem_filepath": "/path/to/uem/abcd.uem"} -In each line of the input manifest file, ``audio_filepath`` item is mandatory while the rest of the items are optional and can be passed for desired diarization setting. We refer to this file as a manifest file. This manifest file can be created by using the script in ``/scripts/speaker_tasks/pathsfiles_to_manifest.py``. The following example shows how to run ``pathsfiles_to_manifest.py`` by providing path list files. +In each line of the input manifest file, ``audio_filepath`` item is mandatory while the rest of the items are optional and can be passed for desired diarization setting. We refer to this file as a manifest file. This manifest file can be created by using the script in ``/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py``. The following example shows how to run ``pathfiles_to_diarize_manifest.py`` by providing path list files. .. code-block:: bash - python pathsfiles_to_manifest.py --paths2audio_files /path/to/audio_file_path_list.txt \ + python pathfiles_to_diarize_manifest.py --paths2audio_files /path/to/audio_file_path_list.txt \ --paths2txt_files /path/to/transcript_file_path_list.txt \ --paths2rttm_files /path/to/rttm_file_path_list.txt \ --paths2uem_files /path/to/uem_file_path_list.txt \ @@ -40,7 +40,7 @@ The ``--paths2audio_files`` and ``--manifest_filepath`` are required arguments. /path/to/abcd02.rttm -The path list files containing the absolute paths to these WAV, RTTM, TXT, CTM and UEM files should be provided as in the above example. ``pathsfiles_to_manifest.py`` script will match each file using the unique filename (e.g. ``abcd``). Finally, the absolute path of the created manifest file should be provided through Hydra configuration as shown below: +The path list files containing the absolute paths to these WAV, RTTM, TXT, CTM and UEM files should be provided as in the above example. ``pathsfiles_to_diarize_manifest.py`` script will match each file using the unique filename (e.g. ``abcd``). Finally, the absolute path of the created manifest file should be provided through Hydra configuration as shown below: .. code-block:: yaml @@ -127,7 +127,7 @@ To evaluate the performance on AMI Meeting Corpus, the following instructions ca - Download AMI Meeting Corpus from `AMI website `_. Choose ``Headset mix`` which has a mono wav file for each session. - Download the test set (whitelist) from `Pyannotate AMI test set whitelist `_. - The merged RTTM file for AMI test set can be downloaded from `Pyannotate AMI test set RTTM file `_. Note that this file should be split into individual rttm files. Download split rttm files for AMI test set from `AMI test set split RTTM files `_. - - Generate an input manifest file using ``/scripts/speaker_tasks/pathsfiles_to_manifest.py`` + - Generate an input manifest file using ``/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py`` CallHome American English Speech (CHAES), LDC97S42 @@ -154,5 +154,5 @@ To evaluate the performance on AMI Meeting Corpus, the following instructions ca - Download CHAES Meeting Corpus at LDC website `LDC97S42 `_ (CHAES is not publicly available). - Download the CH109 filename list (whitelist) from `CH109 whitelist `_. - Download RTTM files for CH109 set from `CH109 RTTM files `_. - - Generate an input manifest file using ``/scripts/speaker_tasks/pathsfiles_to_manifest.py`` + - Generate an input manifest file using ``/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py`` diff --git a/docs/source/asr/speaker_recognition/datasets.rst b/docs/source/asr/speaker_recognition/datasets.rst index 50a0eaec8a9a..88c600b3c523 100644 --- a/docs/source/asr/speaker_recognition/datasets.rst +++ b/docs/source/asr/speaker_recognition/datasets.rst @@ -24,35 +24,35 @@ After download and conversion, your `data` folder should contain directories wit All-other Datasets ------------------ -These methods can be applied to any dataset to get similar training manifest files. +These methods can be applied to any dataset to get similar training or inference manifest files. -First we prepare scp file(s) containing absolute paths to all the wav files required for each of the train, dev, and test set. This can be easily prepared by using ``find`` bash command as follows: +`filelist_to_manifest.py` script in `$/scripts/speaker_tasks/` folder generates manifest file from a text file containing paths to audio files. -.. code-block:: bash - - !find {data_dir}/{train_dir} -iname "*.wav" > data/train_all.scp - !head -n 3 data/train_all.scp +sample `filelist.txt` file contents: +.. code-block:: bash -Based on the created scp file, we use `scp_to_manifest.py` script to convert it to a manifest file. This script takes three optional arguments: + /data/datasets/voxceleb/data/dev/aac_wav/id00179/Q3G6nMr1ji0/00086.wav + /data/datasets/voxceleb/data/dev/aac_wav/id00806/VjpQLxHQQe4/00302.wav + /data/datasets/voxceleb/data/dev/aac_wav/id01510/k2tzXQXvNPU/00132.wav -* id: This value is used to assign speaker label to each audio file. This is the field number separated by `/` from the audio file path. For example if all audio file paths follow the convention of path/to/speaker_folder/unique_speaker_label/file_name.wav, by picking `id=3 or id=-2` script picks unique_speaker_label as label for that utterance. -* split: Optional argument to split the manifest in to train and dev json files -* create_chunks: Optional argument to randomly spit each audio file in to chunks of 1.5 sec, 2 sec and 3 sec for robust training of speaker embedding extractor model. +This list file is used to generate manifest file. This script has optional arguments to split the whole manifest file in to train and dev and also segment audio files to smaller segments for robust training (for testing, we don't need to create segments for each utterance). +sample usage: -After the download and conversion, your data folder should contain directories with manifest files as: - -* `data//train.json` -* `data//dev.json` -* `data//train_all.json` +.. code-block:: bash -Each line in the manifest file describes a training sample - audio_filepath contains the path to the wav file, duration it's duration in seconds, and label is the speaker class label: + python filelist_to_manifest.py --filelist=filelist.txt --id=-3 --out=speaker_manifest.json +This would create a manifest containing file contents as shown below: .. code-block:: json - {"audio_filepath": "/audio_file.wav", "duration": 3.9, "label": "speaker_id"} + {"audio_filepath": "/data/datasets/voxceleb/data/dev/aac_wav/id00179/Q3G6nMr1ji0/00086.wav", "offset": 0, "duration": 4.16, "label": "id00179"} + {"audio_filepath": "/data/datasets/voxceleb/data/dev/aac_wav/id00806/VjpQLxHQQe4/00302.wav", "offset": 0, "duration": 12.288, "label": "id00806"} + {"audio_filepath": "/data/datasets/voxceleb/data/dev/aac_wav/id01510/k2tzXQXvNPU/00132.wav", "offset": 0, "duration": 4.608, "label": "id01510"} +For other optional arguments like splitting manifest file to train and dev and for creating segements from each utterance refer to the arguments +described in the script. Tarred Datasets --------------- diff --git a/examples/speaker_tasks/recognition/README.md b/examples/speaker_tasks/recognition/README.md index b8dbdbf26388..459fc77d4b55 100644 --- a/examples/speaker_tasks/recognition/README.md +++ b/examples/speaker_tasks/recognition/README.md @@ -48,8 +48,8 @@ We first generate manifest file to get embeddings. The embeddings are then used ```bash # create list of files from voxceleb1 test folder (40 speaker test set) -find -iname '*.wav' > voxceleb1_test_files.scp -python /scripts/speaker_tasks/scp_to_manifest.py --scp voxceleb1_test_files.scp --id -3 --out voxceleb1_test_manifest.json +find -iname '*.wav' > voxceleb1_test_files.txt +python /scripts/speaker_tasks/filelist_to_manifest.py --filelist voxceleb1_test_files.txt --id -3 --out voxceleb1_test_manifest.json ``` ### Embedding Extraction Now using the manifest file created, we can extract embeddings to `data` folder using: @@ -92,14 +92,14 @@ ffmpeg -v 8 -i -f wav -acodec pcm_s16le Generate a list file that contains paths to all the dev audio files from voxceleb1 and voxceleb2 using find command as shown below: ```bash -find -iname '*.wav' > voxceleb1_dev.scp -find -iname '*.wav' > voxceleb2_dev.scp -cat voxceleb1_dev.scp voxceleb2_dev.scp > voxceleb12.scp +find -iname '*.wav' > voxceleb1_dev.txt +find -iname '*.wav' > voxceleb2_dev.txt +cat voxceleb1_dev.txt voxceleb2_dev.txt > voxceleb12.txt ``` -This list file is now used to generate training and validation manifest files using a script provided in `/scripts/speaker_tasks/`. This script has optional arguments to split the whole manifest file in to train and dev and also chunk audio files to smaller chunks for robust training (for testing, we don't need this). +This list file is now used to generate training and validation manifest files using a script provided in `/scripts/speaker_tasks/`. This script has optional arguments to split the whole manifest file in to train and dev and also chunk audio files to smaller segments for robust training (for testing, we don't need this). ```bash -python /scripts/speaker_tasks/scp_to_manifest.py --scp voxceleb12.scp --id -3 --out voxceleb12_manifest.json --split --create_chunks +python /scripts/speaker_tasks/filelist_to_manifest.py --filelist voxceleb12.txt --id -3 --out voxceleb12_manifest.json --split --create_segments ``` This creates `train.json, dev.json` in the current working directory. diff --git a/scripts/dataset_processing/get_hi-mia_data.py b/scripts/dataset_processing/get_hi-mia_data.py index 19572ac55472..4fbc3bcc26f9 100644 --- a/scripts/dataset_processing/get_hi-mia_data.py +++ b/scripts/dataset_processing/get_hi-mia_data.py @@ -135,7 +135,7 @@ def __process_data(data_folder: str, data_set: str): """ fullpath = os.path.abspath(data_folder) - scp = glob(fullpath + "/**/*.wav", recursive=True) + filelist = glob(fullpath + "/**/*.wav", recursive=True) out = os.path.join(fullpath, data_set + "_all.json") utt2spk = os.path.join(fullpath, "utt2spk") utt2spk_file = open(utt2spk, "w") @@ -152,7 +152,7 @@ def __process_data(data_folder: str, data_set: str): speakers = [] lines = [] with open(out, "w") as outfile: - for line in tqdm(scp): + for line in tqdm(filelist): line = line.strip() y, sr = l.load(line, sr=None) if sr != 16000: diff --git a/scripts/speaker_tasks/filelist_to_manifest.py b/scripts/speaker_tasks/filelist_to_manifest.py index 18ad6579a551..3a6c27d39377 100644 --- a/scripts/speaker_tasks/filelist_to_manifest.py +++ b/scripts/speaker_tasks/filelist_to_manifest.py @@ -30,21 +30,21 @@ This scipt converts a filelist file where each line contains to a manifest json file. Optionally post processes the manifest file to create dev and train split for speaker embedding -training, also optionally chunk an audio file in to segments of random DURATIONS and create those +training, also optionally segment an audio file in to segments of random DURATIONS and create those wav files in CWD. -While creating chunks, if audio is not sampled at 16Khz, it resamples to 16Khz and write the wav file. +While creating segments, if audio is not sampled at 16kHz, it resamples to 16kHz and write the wav file. Args: --filelist: path to file containing list of audio files ---manifest(optional): if you already have manifest file, but would like to process it for creating chunks and splitting then use manifest ignoring filelist +--manifest(optional): if you already have manifest file, but would like to process it for creating + segments and splitting then use manifest ignoring filelist --id: index of speaker label in filename present in filelist file that is separated by '/' --out: output manifest file name --split: if you would want to split the manifest file for training purposes - you may not need this for test set. output file names is _.json - Defaults to False ---create_chunks:if you would want to chunk each manifest line to chunks of 4 sec or less - you may not need this for test set, Defaults to False ---min_spkrs_count: min number of samples per speaker to consider and ignore otherwise + you may not need this for test set. output file names is _.json, defaults to False +--create_segments: if you would want to segment each manifest line to segments of [1,2,3,4] sec or less + you may not need this for test set, defaults to False +--min_spkrs_count: min number of samples per speaker to consider and ignore otherwise, defaults to 0 (all speakers) """ DURATIONS = sorted([1, 2, 3, 4], reverse=True) @@ -60,7 +60,7 @@ def filter_manifest_line(manifest_line): dur = manifest_line['duration'] label = manifest_line['label'] endname = os.path.splitext(audio_path.split(label, 1)[-1])[0] - to_path = os.path.join(CWD, 'chunks', label) + to_path = os.path.join(CWD, 'segments', label) to_path = os.path.join(to_path, endname[1:]) os.makedirs(os.path.dirname(to_path), exist_ok=True) @@ -87,8 +87,8 @@ def filter_manifest_line(manifest_line): c_start = int(float(start * sr)) c_end = c_start + int(float(temp_dur * sr)) - chunk = signal[c_start:c_end] - sf.write(to_file, chunk, sr) + segment = signal[c_start:c_end] + sf.write(to_file, segment, sr) meta = manifest_line.copy() meta['audio_filepath'] = to_file @@ -172,7 +172,7 @@ def get_labels(lines): return labels -def main(filelist, manifest, id, out, split=False, create_chunks=False, min_count=10): +def main(filelist, manifest, id, out, split=False, create_segments=False, min_count=10): if os.path.exists(out): os.remove(out) if filelist: @@ -185,8 +185,8 @@ def main(filelist, manifest, id, out, split=False, create_chunks=False, min_coun lines = process_map(get_duration, lines, chunksize=100) - if create_chunks: - print(f"creating and writing chunks to {CWD}") + if create_segments: + print(f"creating and writing segments to {CWD}") lines = process_map(filter_manifest_line, lines, chunksize=100) temp = [] for line in lines: @@ -197,7 +197,7 @@ def main(filelist, manifest, id, out, split=False, create_chunks=False, min_coun speakers = [x['label'] for x in lines] if min_count: - speakers, lines = count_and_consider_only(speakers, lines, min_count) + speakers, lines = count_and_consider_only(speakers, lines, abs(min_count)) write_file(out, lines, range(len(lines))) path = os.path.dirname(out) @@ -232,14 +232,14 @@ def main(filelist, manifest, id, out, split=False, create_chunks=False, min_coun action='store_true', ) parser.add_argument( - "--create_chunks", - help="bool if you would want to chunk each manifest line to chunks of 4 sec or less", + "--create_segments", + help="bool if you would want to segment each manifest line to segments of 4 sec or less", required=False, action='store_true', ) parser.add_argument( "--min_spkrs_count", - default=10, + default=0, type=int, help="min number of samples per speaker to consider and ignore otherwise", ) @@ -247,5 +247,5 @@ def main(filelist, manifest, id, out, split=False, create_chunks=False, min_coun args = parser.parse_args() main( - args.filelist, args.manifest, args.id, args.out, args.split, args.create_chunks, args.min_spkrs_count, + args.filelist, args.manifest, args.id, args.out, args.split, args.create_segments, args.min_spkrs_count, ) diff --git a/scripts/speaker_tasks/pathsfiles_to_manifest.py b/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py similarity index 100% rename from scripts/speaker_tasks/pathsfiles_to_manifest.py rename to scripts/speaker_tasks/pathfiles_to_diarize_manifest.py diff --git a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb index 76dbf7bd12e1..45a3787641b3 100644 --- a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb +++ b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb @@ -235,7 +235,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathsfiles_to_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results." + "Lets create a manifest file with the an4 audio and rttm available. If you have more than one file you may also use the script `NeMo/scripts/speaker_tasks/pathfiles_to_diarize_manifest.py` to generate a manifest file from a list of audio files. In addition, you can optionally include rttm files to evaluate the diarization results." ] }, { @@ -663,4 +663,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb index d3671f5ff776..02fd31c02b71 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb @@ -169,7 +169,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Lets create manifest with the an4 audio and rttm available. If you have more than one files you may also use the script `pathsfiles_to_manifest.py` to generate manifest file from list of audio files and optionally rttm files " + "Lets create manifest with the an4 audio and rttm available. If you have more than one files you may also use the script `pathfiles_to_diarize_manifest.py` to generate manifest file from list of audio files and optionally rttm files " ] }, { @@ -593,4 +593,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index f2d0a45327a2..2f81df174b17 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -114,7 +114,7 @@ "source": [ "Since an4 is not designed for speaker recognition, this facilitates the opportunity to demonstrate how you can generate manifest files that are necessary for training. These methods can be applied to any dataset to get similar training manifest files. \n", "\n", - "First get an scp file(s) which has all the wav files with absolute paths for each of the train, dev, and test set. This can be easily done by the `find` bash command" + "First, create a list file which has all the wav files with absolute paths for each of the train, dev, and test set. This can be easily done by the `find` bash command" ] }, { @@ -127,7 +127,7 @@ }, "outputs": [], "source": [ - "!find {data_dir}/an4/wav/an4_clstk -iname \"*.wav\" > data/an4/wav/an4_clstk/train_all.scp" + "!find {data_dir}/an4/wav/an4_clstk -iname \"*.wav\" > data/an4/wav/an4_clstk/train_all.txt" ] }, { @@ -137,7 +137,7 @@ "id": "BhWVg2QoDhL3" }, "source": [ - "Let's look at the first 3 lines of scp file for train." + "Let's look at the first 3 lines of filelist text file for train." ] }, { @@ -150,7 +150,7 @@ }, "outputs": [], "source": [ - "!head -n 3 {data_dir}/an4/wav/an4_clstk/train_all.scp" + "!head -n 3 {data_dir}/an4/wav/an4_clstk/train_all.txt" ] }, { @@ -160,7 +160,7 @@ "id": "Y9L9Tl0XDw5Z" }, "source": [ - "Since we created the scp file for the train, we use `scp_to_manifest.py` to convert this scp file to a manifest file and then optionally split the files to train \\& dev for evaluating the models while training by using the `--split` flag. We wouldn't be needing the `--split` option for the test folder. \n", + "Since we created the list text file for the train, we use `filelist_to_manifest.py` to convert this text file to a manifest file and then optionally split the files to train \\& dev for evaluating the models during training by using the `--split` flag. We wouldn't be needing the `--split` option for the test folder. \n", "Accordingly please mention the `id` number, which is the field num separated by `/` to be considered as the speaker label " ] }, @@ -195,8 +195,8 @@ "if not os.path.exists('scripts'):\n", " print(\"Downloading necessary scripts\")\n", " !mkdir -p scripts/speaker_tasks\n", - " !wget -P scripts/speaker_tasks/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/speaker_tasks/scp_to_manifest.py\n", - "!python {NEMO_ROOT}/scripts/speaker_tasks/scp_to_manifest.py --scp {data_dir}/an4/wav/an4_clstk/train_all.scp --id -2 --out {data_dir}/an4/wav/an4_clstk/all_manifest.json --split" + " !wget -P scripts/speaker_tasks/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/speaker_tasks/filelist_to_manifest.py\n", + "!python {NEMO_ROOT}/scripts/speaker_tasks/filelist_to_manifest.py --filelist {data_dir}/an4/wav/an4_clstk/train_all.txt --id -2 --out {data_dir}/an4/wav/an4_clstk/all_manifest.json --split" ] }, { @@ -206,7 +206,7 @@ "id": "5kPCmx5DHvY5" }, "source": [ - "Generate the scp for the test folder and then convert it to a manifest." + "Generate the list text file for the test folder and then convert it to a manifest." ] }, { @@ -219,8 +219,8 @@ }, "outputs": [], "source": [ - "!find {data_dir}/an4/wav/an4test_clstk -iname \"*.wav\" > {data_dir}/an4/wav/an4test_clstk/test_all.scp\n", - "!python {NEMO_ROOT}/scripts/speaker_tasks/scp_to_manifest.py --scp {data_dir}/an4/wav/an4test_clstk/test_all.scp --id -2 --out {data_dir}/an4/wav/an4test_clstk/test.json" + "!find {data_dir}/an4/wav/an4test_clstk -iname \"*.wav\" > {data_dir}/an4/wav/an4test_clstk/test_all.txt\n", + "!python {NEMO_ROOT}/scripts/speaker_tasks/filelist_to_manifest.py --filelist {data_dir}/an4/wav/an4test_clstk/test_all.txt --id -2 --out {data_dir}/an4/wav/an4test_clstk/test.json" ] }, { @@ -1264,4 +1264,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} From 52e5b25342954adfcc335346ce57c75b73278643 Mon Sep 17 00:00:00 2001 From: treacker Date: Sun, 15 May 2022 08:40:31 -0700 Subject: [PATCH 112/244] changed to vits g2p --- nemo/collections/tts/torch/tts_tokenizers.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index 677de052e7f4..5f28ffe18841 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -361,12 +361,16 @@ def encode(self, text): class IPAPhonemesTokenizer(BaseTokenizer): # fmt: off - _punctuation = ';:,.!?¡¿—…"«»“”#()-~[]|/' - _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' - _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻàãäåèéíîôõúûüăēĕĝğĩĭŏŝšũŭžǐǝǧʻˀ˥˦˧˨˩̝̞̠̥̪̃̆̊̍̚εابرسشصفلمهوᵐᵑᵝṣẽ​‍‎’⁠ⁿっゎッヮヶ�" + # _punctuation = ';:,.!?¡¿—…"«»“”#()-~[]|/' + # _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + # _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻàãäåèéíîôõúûüăēĕĝğĩĭŏŝšũŭžǐǝǧʻˀ˥˦˧˨˩̝̞̠̥̪̃̆̊̍̚εابرسشصفلمهوᵐᵑᵝṣẽ​‍‎’⁠ⁿっゎッヮヶ�" PAD = '_' # fmt: on + _punctuation = ';:,.!?¡¿—…"«»“” ' + _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' + _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" + PUNCT_LIST = [p for p in _punctuation] def __init__( From deb826727675c21a69897a3ec76511e45600e639 Mon Sep 17 00:00:00 2001 From: treacker Date: Sun, 15 May 2022 08:40:54 -0700 Subject: [PATCH 113/244] refactoring --- examples/tts/conf/vits.yaml | 14 +++++++------- nemo/collections/tts/models/vits.py | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index e62da06af1df..4b2139a29ae0 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -6,11 +6,11 @@ name: VITS -# train_dataset: "../datasets/ljspeech_ds/LJSpeech-1.1/train_manifest.json" -train_dataset: "raid/datasets/tts_data/train_manifest.json" +train_dataset: "../datasets/ljspeech_ds/LJSpeech-1.1/train_manifest.json" +# train_dataset: "raid/datasets/tts_data/train_manifest.json" -# validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" -validation_datasets: "raid/datasets/tts_data/val_manifest.json" +validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" +# validation_datasets: "raid/datasets/tts_data/val_manifest.json" sup_data_path: null sup_data_types: null @@ -126,7 +126,7 @@ model: pin_memory: false batch_sampler: - batch_size: 96 + batch_size: 64 boundaries: [32,300,400,500,600,700,800,900,1000] num_replicas: ${trainer.devices} shuffle: true @@ -211,7 +211,7 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_bs96 + exp_dir: ../exps/vits_orig_g2p name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -220,7 +220,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_bs96 + name: vits_orig_g2p project: ${name} entity: nvidia resume_if_exists: false diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index a8eef40c9dd2..1a0d47899f4c 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -338,7 +338,7 @@ def validation_step(self, batch, batch_idx): ), ] - logger.log({"specs": specs, "audios": audios}) + logger.log({"specs": specs, "audios": audios}) def _loader(self, cfg): try: From 58b2f4eaaa354dbb938e31f3f99bad34abd6cab6 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 25 May 2022 03:34:10 -0700 Subject: [PATCH 114/244] added cosineLR --- nemo/collections/tts/models/vits.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 1a0d47899f4c..c14cb06b969f 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -43,6 +43,7 @@ spec_to_mel_torch, ) from nemo.core.classes.common import PretrainedModelInfo +from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils class VitsModel(TextToWaveform): @@ -164,13 +165,21 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) + + max_steps=400000 + min_lr = 1e-5 + wu_ratio = 0.02 + + # scheduler_g = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr, warmup_steps=max_steps * wu_ratio,) + # scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr,) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) scheduler_g_dict = { 'scheduler': scheduler_g, 'interval': 'step', } - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) + scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] From 57b0c8b2b3be27c5f53ad59951a03caaaef31c5e Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 25 May 2022 09:28:59 -0700 Subject: [PATCH 115/244] Updated whitelist path --- examples/tts/conf/vits.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 4b2139a29ae0..59b1fc17fdb1 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -14,13 +14,12 @@ validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" sup_data_path: null sup_data_types: null -# checkpoint_path: 'vits_full/VITS/2022-03-25_17-41-47/checkpoints/VITS--loss_gen_all\=37.8252-epoch\=7719-last.ckpt' # checkpoint_path: 'checkpoint' checkpoint_path: null phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" -whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist_lj_speech.tsv" +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" model: From f087eb724894652944db511ea15861bcdd8ba689 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 8 Jun 2022 06:44:39 -0700 Subject: [PATCH 116/244] added vanilla torch grad scaler --- examples/tts/conf/vits.yaml | 3 +++ examples/tts/vits.py | 10 +++++----- nemo/collections/tts/models/vits.py | 30 +++++++++++++++++++++-------- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 59b1fc17fdb1..f740254e0361 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -200,6 +200,9 @@ trainer: accelerator: gpu strategy: ddp precision: 16 + amp_backend: 'apex' + amp_level: 'O2' + benchmark: true max_epochs: 1000000 accumulate_grad_batches: 1 # gradient_clip_val: 1000.0 diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 719f599a7445..e42ef664c839 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -24,12 +24,12 @@ @hydra_runner(config_path="conf", config_name="vits") def main(cfg): - plugins = [] - if cfg.trainer.precision in [16, 'bf16']: - scaler = GradScaler(enabled=True) - plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) + # plugins = [] + # if cfg.trainer.precision in [16, 'bf16']: + # scaler = GradScaler(enabled=True) + # plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) - trainer = pl.Trainer(resume_from_checkpoint=cfg.checkpoint_path, plugins=plugins, replace_sampler_ddp=False, **cfg.trainer) + trainer = pl.Trainer(resume_from_checkpoint=cfg.checkpoint_path, replace_sampler_ddp=False, **cfg.trainer) # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index c14cb06b969f..dff4d9002837 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -23,7 +23,7 @@ from omegaconf import DictConfig from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger -from torch.cuda.amp import autocast +from torch.cuda.amp import autocast, GradScaler from torch.nn import functional as F from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler @@ -68,6 +68,8 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.tokenizer_pad = self.tokenizer.pad self.tokenizer_unk = self.tokenizer.oov + self.scaler = GradScaler() + super().__init__(cfg=cfg, trainer=trainer) self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) @@ -243,8 +245,9 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + + with autocast(enabled=True): + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g) @@ -253,10 +256,16 @@ def training_step(self, batch, batch_idx): # train discriminator optim_d.zero_grad() - self.manual_backward(loss_disc_all) - # TODO: maybe change it to PTL-based function + self.manual_backward(self.scaler.scale(loss_disc_all)) + + # self.scaler.scale(loss_disc_all).backward() + self.scaler.unscale_(optim_d) norm_d = clip_grad_value_(self.net_d.parameters(), None) - optim_d.step() + self.scaler.step(optim_d) + self.scaler.update() + # TODO: maybe change it to PTL-based function + # norm_d = clip_grad_value_(self.net_d.parameters(), None) + # optim_d.step() with autocast(enabled=True): # Generator @@ -271,10 +280,15 @@ def training_step(self, batch, batch_idx): # train generator optim_g.zero_grad() - self.manual_backward(loss_gen_all) + self.manual_backward(self.scaler.scale(loss_gen_all)) # TODO: maybe change it to PTL-based function + # norm_g = clip_grad_value_(self.net_g.parameters(), None) + # optim_g.step() + # self.scaler.scale(loss_gen_all).backward() + self.scaler.unscale_(optim_g) norm_g = clip_grad_value_(self.net_g.parameters(), None) - optim_g.step() + self.scaler.step(optim_g) + self.scaler.update() schedulers = self.lr_schedulers() if schedulers is not None and self.trainer.is_last_batch: From e476eb0d7752cdffcc794e7df9a4bd74fa953564 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 8 Jun 2022 12:57:58 -0700 Subject: [PATCH 117/244] Fixed lightning version --- requirements/requirements_lightning.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index 405320c26cbe..b29ef23ec915 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning>=1.6.1 +pytorch-lightning==1.6.1 torchmetrics>=0.4.1rc0 transformers>=4.0.1 webdataset>=0.1.48,<=0.1.62 From af0c6794ee23efce40b8119d09a9d59b63e9d951 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 8 Jun 2022 12:58:28 -0700 Subject: [PATCH 118/244] added warmup and wd --- nemo/collections/tts/models/vits.py | 36 +++++++++++------------------ 1 file changed, 13 insertions(+), 23 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index dff4d9002837..d3cfb8d3d2c0 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -68,7 +68,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.tokenizer_pad = self.tokenizer.pad self.tokenizer_unk = self.tokenizer.oov - self.scaler = GradScaler() + # self.scaler = GradScaler() super().__init__(cfg=cfg, trainer=trainer) @@ -165,18 +165,18 @@ def parse(self, str_input: str) -> torch.tensor: pass def configure_optimizers(self): - optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps) + optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) max_steps=400000 min_lr = 1e-5 wu_ratio = 0.02 - # scheduler_g = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr, warmup_steps=max_steps * wu_ratio,) - # scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr,) + scheduler_g = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) + scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) + # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) + # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) scheduler_g_dict = { 'scheduler': scheduler_g, 'interval': 'step', @@ -245,7 +245,6 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - with autocast(enabled=True): y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) with autocast(enabled=False): @@ -256,16 +255,11 @@ def training_step(self, batch, batch_idx): # train discriminator optim_d.zero_grad() - self.manual_backward(self.scaler.scale(loss_disc_all)) + self.manual_backward(loss_disc_all) - # self.scaler.scale(loss_disc_all).backward() - self.scaler.unscale_(optim_d) - norm_d = clip_grad_value_(self.net_d.parameters(), None) - self.scaler.step(optim_d) - self.scaler.update() # TODO: maybe change it to PTL-based function - # norm_d = clip_grad_value_(self.net_d.parameters(), None) - # optim_d.step() + norm_d = clip_grad_value_(self.net_d.parameters(), None) + optim_d.step() with autocast(enabled=True): # Generator @@ -280,15 +274,11 @@ def training_step(self, batch, batch_idx): # train generator optim_g.zero_grad() - self.manual_backward(self.scaler.scale(loss_gen_all)) + self.manual_backward(loss_gen_all) # TODO: maybe change it to PTL-based function - # norm_g = clip_grad_value_(self.net_g.parameters(), None) - # optim_g.step() - # self.scaler.scale(loss_gen_all).backward() - self.scaler.unscale_(optim_g) norm_g = clip_grad_value_(self.net_g.parameters(), None) - self.scaler.step(optim_g) - self.scaler.update() + optim_g.step() + schedulers = self.lr_schedulers() if schedulers is not None and self.trainer.is_last_batch: From 17b4d4ea0f1d6a20897afb0cbc973b5219900026 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sun, 12 Jun 2022 03:53:15 -0700 Subject: [PATCH 119/244] switched to cosineLR --- nemo/collections/tts/models/vits.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index d3cfb8d3d2c0..46b316288107 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -172,8 +172,8 @@ def configure_optimizers(self): min_lr = 1e-5 wu_ratio = 0.02 - scheduler_g = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) - scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) + scheduler_g = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr, warmup_steps=1000,) + scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr, warmup_steps=1000,) # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) @@ -256,8 +256,6 @@ def training_step(self, batch, batch_idx): # train discriminator optim_d.zero_grad() self.manual_backward(loss_disc_all) - - # TODO: maybe change it to PTL-based function norm_d = clip_grad_value_(self.net_d.parameters(), None) optim_d.step() @@ -275,13 +273,12 @@ def training_step(self, batch, batch_idx): # train generator optim_g.zero_grad() self.manual_backward(loss_gen_all) - # TODO: maybe change it to PTL-based function norm_g = clip_grad_value_(self.net_g.parameters(), None) optim_g.step() schedulers = self.lr_schedulers() - if schedulers is not None and self.trainer.is_last_batch: + if schedulers is not None:# and self.trainer.is_last_batch: sch1, sch2 = schedulers sch1.step() sch2.step() From 7510e718b3049b2073ccc3788bdbb2748c3114e0 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 20 Jun 2022 03:00:01 -0700 Subject: [PATCH 120/244] refactored data classes for vits --- .../tts/modules/monotonic_align/__init__.py | 57 +++++++++++++++++-- .../tts/modules/monotonic_align/setup.py | 7 ++- nemo/collections/tts/torch/data.py | 20 +++++-- nemo/collections/tts/torch/tts_tokenizers.py | 12 ++-- 4 files changed, 78 insertions(+), 18 deletions(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 4ab8442858bf..15be08d3e566 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -35,16 +35,16 @@ import numpy as np import torch +import numba -from .numba_core import maximum_path_c -# from .core import maximum_path_c +# from .numba_core import maximum_path_c def maximum_path(neg_cent, mask): """ Cython optimized version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ device = neg_cent.device dtype = neg_cent.dtype neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) @@ -54,3 +54,50 @@ def maximum_path(neg_cent, mask): t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) maximum_path_c(path, neg_cent, t_t_max, t_s_max) return torch.from_numpy(path).to(device=device, dtype=dtype) + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): + """ + Args: + path: int32[:, :] + value: float32[:, :] + t_y: int + t_x: int + max_neg_val: float + """ + index: int = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0.0 + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_c(paths, values, t_ys, t_xs): + """ + Args: + paths: int32[:, :, :] + values: float32[:, :, :] + t_ys: int[:] + t_xs: int[:] + """ + b: int = paths.shape[0] + for i in numba.prange(b): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) \ No newline at end of file diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py index 245556dcf7d6..76e09159c6cd 100644 --- a/nemo/collections/tts/modules/monotonic_align/setup.py +++ b/nemo/collections/tts/modules/monotonic_align/setup.py @@ -35,7 +35,10 @@ # SOFTWARE. from distutils.core import setup -from Cython.Build import cythonize +# from Cython.Build import cythonize import numpy -setup(name='monotonic_align', ext_modules=cythonize("core.pyx"), include_dirs=[numpy.get_include()]) +setup( + name='monotonic_align', + # ext_modules=cythonize("core.pyx"), + include_dirs=[numpy.get_include()]) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index b7d156e22047..9664bdf79f38 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -49,7 +49,7 @@ TTSDataType, WithLens, ) -from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer +from nemo.collections.tts.torch.tts_tokenizers import BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer, IPAPhonemesTokenizer from nemo.core.classes import Dataset from nemo.utils import logging @@ -77,6 +77,7 @@ def __init__( n_mels: int = 80, lowfreq: int = 0, highfreq: Optional[int] = None, + add_blank=True, **kwargs, ): """Dataset which can be used for training spectrogram generators and end-to-end TTS models. @@ -138,7 +139,7 @@ def __init__( self.text_tokenizer = text_tokenizer self.phoneme_probability = None - if isinstance(self.text_tokenizer, BaseTokenizer): + if isinstance(self.text_tokenizer, IPAPhonemesTokenizer): self.text_tokenizer_pad_id = text_tokenizer.pad self.tokens = text_tokenizer.tokens self.phoneme_probability = getattr(self.text_tokenizer, "phoneme_probability", None) @@ -216,6 +217,7 @@ def __init__( self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) + self.add_blank = add_blank # Initialize audio and mel related parameters self.sample_rate = sample_rate self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate) @@ -391,7 +393,10 @@ def get_log_mel(self, audio): mel = torch.matmul(self.fb.to(spec.dtype), spec) log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny)) return log_mel - + def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result def __getitem__(self, index): sample = self.data[index] audio_path_as_text_id = sample["audio_filepath"].replace("/", "-").split(".")[0] @@ -405,10 +410,15 @@ def __getitem__(self, index): audio, audio_length = features, torch.tensor(features.shape[0]).long() if "text_tokens" in sample: - text = torch.tensor(sample["text_tokens"]).long() - text_length = torch.tensor(len(sample["text_tokens"])).long() + text = sample["text_tokens"] + if self.add_blank: + text = intersperse(text, 0) + text = torch.tensor(text).long() + text_length = torch.tensor(len(text)).long() else: tokenized = self.text_tokenizer(sample["normalized_text"]) + if self.add_blank: + tokenized = intersperse(tokenized, 0) text = torch.tensor(tokenized).long() text_length = torch.tensor(len(tokenized)).long() diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index 26f4396107b2..1df5649db4e1 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -42,7 +42,7 @@ def __init__(self, tokens, *, pad=PAD, blank=BLANK, oov=OOV, sep='', add_blank_a super().__init__() tokens = list(tokens) - self.pad, tokens = len(tokens), tokens + [pad] # Padding + self.pad, tokens = 0, [pad] + tokens # Padding if add_blank_at is not None: self.blank, tokens = len(tokens), tokens + [blank] # Reserved for blank from asr-model @@ -422,16 +422,16 @@ def __init__( if hasattr(g2p, "phoneme_probability"): self.phoneme_probability = g2p.phoneme_probability tokens = [] + + if punct: + tokens.extend(self.PUNCT_LIST) + self.space, tokens = len(tokens), tokens + [space] # Space if silence is not None: self.silence, tokens = len(tokens), tokens + [silence] # Silence - - tokens.extend([l for l in self._letters_ipa]) tokens.extend([l for l in self._letters]) - - if punct: - tokens.extend(self.PUNCT_LIST) + tokens.extend([l for l in self._letters_ipa]) super().__init__(tokens, oov=oov, pad=self.PAD, sep=sep, add_blank_at=add_blank_at) From 8d0f725e35a21b91c15dd6ae4b34ef32ee561694 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 20 Jun 2022 03:03:34 -0700 Subject: [PATCH 121/244] some fixes --- examples/tts/conf/vits.yaml | 4 +++- nemo/collections/tts/modules/vits_modules.py | 10 ++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index f740254e0361..e26a8f663d89 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -79,7 +79,7 @@ model: stresses: true chars: true apostrophe: true - pad_with_space: true + pad_with_space: false g2p: _target_: nemo.collections.tts.torch.g2ps.IPAG2p strip: true @@ -115,6 +115,7 @@ model: trim: False pitch_fmin: ${model.pitch_fmin} pitch_fmax: ${model.pitch_fmax} + add_blank: true dataloader_params: @@ -151,6 +152,7 @@ model: trim: False pitch_fmin: ${model.pitch_fmin} pitch_fmax: ${model.pitch_fmax} + add_blank: true dataloader_params: drop_last: false diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 30275cc49ed6..3012c7407e72 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -478,6 +478,8 @@ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): x = self.convs(x, x_mask) x = self.proj(x) * x_mask + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) if not reverse: flows = self.flows assert w is not None @@ -659,6 +661,8 @@ def forward(self, x, x_lengths, g=None): x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) m, logs = torch.split(stats, self.out_channels, dim=1) z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask @@ -914,8 +918,8 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) o = self.dec(z_slice, g=g) @@ -1060,6 +1064,8 @@ def rand_slice_segments(x, x_lengths=None, segment_size=4): x_lengths = t ids_str_max = x_lengths - segment_size + 1 ids_str_max = ids_str_max.to(device=x.device) + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) ret = slice_segments(x, ids_str, segment_size) return ret, ids_str From aadcb32939d592af04ecd407bf6ae6153268eef8 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 20 Jun 2022 13:39:43 -0700 Subject: [PATCH 122/244] fixed import --- nemo/collections/tts/torch/data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 9664bdf79f38..8c0416855561 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -22,6 +22,7 @@ from typing import Callable, Dict, List, Optional, Union import librosa +from nemo.collections.tts.modules.vits_modules import intersperse import numpy as np import torch from nemo_text_processing.text_normalization.normalize import Normalizer From 8bfe37033bbd4c98737bf64d64d8ed2b1e562122 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 20 Jun 2022 13:40:07 -0700 Subject: [PATCH 123/244] changeg train loop --- examples/tts/conf/vits.yaml | 10 ++--- nemo/collections/tts/models/vits.py | 61 ++++++++++++++--------------- 2 files changed, 35 insertions(+), 36 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index e26a8f663d89..407ea554a24f 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -126,7 +126,7 @@ model: pin_memory: false batch_sampler: - batch_size: 64 + batch_size: 32 boundaries: [32,300,400,500,600,700,800,900,1000] num_replicas: ${trainer.devices} shuffle: true @@ -201,10 +201,10 @@ trainer: devices: 2 accelerator: gpu strategy: ddp - precision: 16 - amp_backend: 'apex' - amp_level: 'O2' - benchmark: true + precision: 32 + # amp_backend: 'apex' + # amp_level: 'O2' + # benchmark: true max_epochs: 1000000 accumulate_grad_batches: 1 # gradient_clip_val: 1000.0 diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 46b316288107..2fa092b80761 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -166,7 +166,7 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr / 2, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) max_steps=400000 min_lr = 1e-5 @@ -210,26 +210,25 @@ def training_step(self, batch, batch_idx): # get optimizers optim_g, optim_d = self.optimizers() - # TODO: support accum gradient or don't allow to use accum gradient in init (y, y_lengths, x, x_lengths) = batch spec = self.get_spec(y) spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) - with autocast(enabled=True): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths - ) - - mel = spec_to_mel_torch( - spec, - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) + # with autocast(enabled=True): + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + x, x_lengths, spec, spec_lengths + ) + + mel = spec_to_mel_torch( + spec, + self._cfg.n_window_size, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax, + ) + y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) y_hat = y_hat.float() y_hat_mel = audio_to_mel_torch( @@ -245,12 +244,12 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - with autocast(enabled=True): - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) - with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - disc_generated_outputs=y_d_hat_g) - loss_disc_all = loss_disc + # with autocast(enabled=True): + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + # with autocast(enabled=False): + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + disc_generated_outputs=y_d_hat_g) + loss_disc_all = loss_disc # train discriminator @@ -259,16 +258,16 @@ def training_step(self, batch, batch_idx): norm_d = clip_grad_value_(self.net_d.parameters(), None) optim_d.step() - with autocast(enabled=True): + # with autocast(enabled=True): # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) - with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel - loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) - loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + # with autocast(enabled=False): + loss_dur = torch.sum(l_length.float()) + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl # train generator optim_g.zero_grad() From 3be013c94bd4ab078eec8ce5a458b4794e1e217d Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 22 Jun 2022 07:52:05 -0700 Subject: [PATCH 124/244] fixed scheduler bug --- nemo/collections/tts/models/vits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 2fa092b80761..e2c3c1beb4bb 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -166,7 +166,7 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr / 2, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) max_steps=400000 min_lr = 1e-5 From fd03723409e27959e66416cd663f3128a06c7ccf Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 12 Jul 2022 05:53:07 -0700 Subject: [PATCH 125/244] refactoring for exps --- examples/tts/conf/vits.yaml | 3 ++- examples/tts/vits.py | 2 +- nemo/collections/tts/models/vits.py | 36 +++++++++++++++++++---------- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 407ea554a24f..a39f2f845150 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -32,7 +32,7 @@ model: n_window_stride: 256 n_fft: 1024 lowfreq: 0 - highfreq: 8000 + highfreq: null window: hann splice_length: 64 @@ -227,5 +227,6 @@ exp_manager: name: vits_orig_g2p project: ${name} entity: nvidia + resume: "allow" resume_if_exists: false resume_ignore_no_checkpoint: false diff --git a/examples/tts/vits.py b/examples/tts/vits.py index e42ef664c839..e83da88b0184 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -29,7 +29,7 @@ def main(cfg): # scaler = GradScaler(enabled=True) # plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) - trainer = pl.Trainer(resume_from_checkpoint=cfg.checkpoint_path, replace_sampler_ddp=False, **cfg.trainer) + trainer = pl.Trainer(replace_sampler_ddp=False, **cfg.trainer) # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e2c3c1beb4bb..be55ae36126a 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -166,14 +166,15 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.1) - max_steps=400000 + max_steps=800000 min_lr = 1e-5 wu_ratio = 0.02 + wu_steps = 16000 - scheduler_g = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr, warmup_steps=1000,) - scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr, warmup_steps=1000,) + scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=max_steps, min_lr=min_lr, warmup_steps=wu_steps,) + scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) @@ -251,13 +252,13 @@ def training_step(self, batch, batch_idx): disc_generated_outputs=y_d_hat_g) loss_disc_all = loss_disc - + # if self.global_step <= 180000: # train discriminator - optim_d.zero_grad() - self.manual_backward(loss_disc_all) - norm_d = clip_grad_value_(self.net_d.parameters(), None) - optim_d.step() - + # optim_d.zero_grad() + # self.manual_backward(loss_disc_all) + # norm_d = clip_grad_value_(self.net_d.parameters(), None) + # optim_d.step() + # with autocast(enabled=True): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) @@ -269,12 +270,23 @@ def training_step(self, batch, batch_idx): loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + # if loss_gen > loss_disc: + # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl + loss_gen + # else: + # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl + # train generator optim_g.zero_grad() self.manual_backward(loss_gen_all) norm_g = clip_grad_value_(self.net_g.parameters(), None) optim_g.step() - + + norm_d = None + # if loss_disc > loss_gen: + # optim_d.zero_grad() + # self.manual_backward(loss_disc_all) + # norm_d = clip_grad_value_(self.net_d.parameters(), None) + # optim_d.step() schedulers = self.lr_schedulers() if schedulers is not None:# and self.trainer.is_last_batch: @@ -291,7 +303,7 @@ def training_step(self, batch, batch_idx): "loss_gen_all": loss_gen_all, "loss_disc_all": loss_disc_all, "grad_gen": norm_g, - "grad_disc": norm_d, + # "grad_disc": norm_d, } for i, v in enumerate(losses_gen): From 5d43cc3762a452c185de6111933bacaeafd54a6f Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 26 Jul 2022 07:53:49 -0700 Subject: [PATCH 126/244] Refactored loss logic --- nemo/collections/tts/losses/vits_losses.py | 11 ++++++---- nemo/collections/tts/models/vits.py | 24 ++++++++-------------- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 3439ba1ff0a0..b5a465ce5898 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -122,14 +122,16 @@ def input_types(self): @property def output_types(self): return { - "loss": NeuralType(elements_type=LossType()), + "real_loss": NeuralType(elements_type=LossType()), + "gen_loss": NeuralType(elements_type=LossType()), "real_losses": [NeuralType(elements_type=LossType())], "fake_losses": [NeuralType(elements_type=LossType())], } @typecheck() def forward(self, disc_real_outputs, disc_generated_outputs): - loss = 0 + gen_loss = 0 + real_loss = 0 r_losses = [] g_losses = [] for dr, dg in zip(disc_real_outputs, disc_generated_outputs): @@ -137,11 +139,12 @@ def forward(self, disc_real_outputs, disc_generated_outputs): dg = dg.float() r_loss = torch.mean((1 - dr) ** 2) g_loss = torch.mean(dg ** 2) - loss += r_loss + g_loss + real_loss += r_loss + gen_loss += g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) - return loss, r_losses, g_losses + return real_loss, gen_loss, r_losses, g_losses class GeneratorLoss(Loss): diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index be55ae36126a..10b87b6a9a04 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -166,7 +166,7 @@ def parse(self, str_input: str) -> torch.tensor: def configure_optimizers(self): optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.1) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) max_steps=800000 min_lr = 1e-5 @@ -248,16 +248,17 @@ def training_step(self, batch, batch_idx): # with autocast(enabled=True): y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) # with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g) - loss_disc_all = loss_disc + loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) + # loss_disc_all = loss_disc_real + loss_disc_gen # if self.global_step <= 180000: # train discriminator - # optim_d.zero_grad() - # self.manual_backward(loss_disc_all) - # norm_d = clip_grad_value_(self.net_d.parameters(), None) - # optim_d.step() + optim_d.zero_grad() + self.manual_backward(loss_disc_all) + norm_d = clip_grad_value_(self.net_d.parameters(), None) + optim_d.step() # with autocast(enabled=True): # Generator @@ -280,13 +281,6 @@ def training_step(self, batch, batch_idx): self.manual_backward(loss_gen_all) norm_g = clip_grad_value_(self.net_g.parameters(), None) optim_g.step() - - norm_d = None - # if loss_disc > loss_gen: - # optim_d.zero_grad() - # self.manual_backward(loss_disc_all) - # norm_d = clip_grad_value_(self.net_d.parameters(), None) - # optim_d.step() schedulers = self.lr_schedulers() if schedulers is not None:# and self.trainer.is_last_batch: @@ -303,7 +297,7 @@ def training_step(self, batch, batch_idx): "loss_gen_all": loss_gen_all, "loss_disc_all": loss_disc_all, "grad_gen": norm_g, - # "grad_disc": norm_d, + "grad_disc": norm_d, } for i, v in enumerate(losses_gen): From 97ac08646e9ebc078aa2539d8a957a12a4de93be Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 2 Aug 2022 04:43:06 -0700 Subject: [PATCH 127/244] Ref for exps --- nemo/collections/tts/losses/vits_losses.py | 10 ++++++---- nemo/collections/tts/models/vits.py | 7 ++++--- nemo/collections/tts/modules/vits_modules.py | 2 ++ 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index b5a465ce5898..a6fb16baeba3 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -123,7 +123,7 @@ def input_types(self): def output_types(self): return { "real_loss": NeuralType(elements_type=LossType()), - "gen_loss": NeuralType(elements_type=LossType()), + # "gen_loss": NeuralType(elements_type=LossType()), "real_losses": [NeuralType(elements_type=LossType())], "fake_losses": [NeuralType(elements_type=LossType())], } @@ -134,17 +134,19 @@ def forward(self, disc_real_outputs, disc_generated_outputs): real_loss = 0 r_losses = [] g_losses = [] + loss = 0 for dr, dg in zip(disc_real_outputs, disc_generated_outputs): dr = dr.float() dg = dg.float() r_loss = torch.mean((1 - dr) ** 2) g_loss = torch.mean(dg ** 2) - real_loss += r_loss - gen_loss += g_loss + # real_loss += r_loss + # gen_loss += g_loss + loss += torch.max(r_loss, g_loss) r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) - return real_loss, gen_loss, r_losses, g_losses + return loss, r_losses, g_losses class GeneratorLoss(Loss): diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 10b87b6a9a04..2e6a56bf5559 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -248,10 +248,11 @@ def training_step(self, batch, batch_idx): # with autocast(enabled=True): y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) # with autocast(enabled=False): - loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + # loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g) - loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) - # loss_disc_all = loss_disc_real + loss_disc_gen + # loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) + loss_disc_all = loss_disc # if self.global_step <= 180000: # train discriminator diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 3012c7407e72..dbf18d2f7dd5 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -736,6 +736,7 @@ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), ]) + self.dropout = nn.Dropout(0.3) self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) def forward(self, x): @@ -772,6 +773,7 @@ def __init__(self, use_spectral_norm=False): norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), ]) + self.dropout = nn.Dropout(0.3) self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) def forward(self, x): From 16eeacb68a99550da731e532742af00a617b5b1d Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 3 Aug 2022 06:19:06 -0700 Subject: [PATCH 128/244] added coqui stuff --- .../tts/losses/vits_coqui_losses.py | 131 ++ nemo/collections/tts/losses/vits_losses.py | 4 +- nemo/collections/tts/models/vits.py | 4 +- nemo/collections/tts/models/vits_coqui.py | 507 +++++ .../tts/modules/vits_coqui_modules.py | 1671 +++++++++++++++++ 5 files changed, 2315 insertions(+), 2 deletions(-) create mode 100644 nemo/collections/tts/losses/vits_coqui_losses.py create mode 100644 nemo/collections/tts/models/vits_coqui.py create mode 100644 nemo/collections/tts/modules/vits_coqui_modules.py diff --git a/nemo/collections/tts/losses/vits_coqui_losses.py b/nemo/collections/tts/losses/vits_coqui_losses.py new file mode 100644 index 000000000000..ce21566757ce --- /dev/null +++ b/nemo/collections/tts/losses/vits_coqui_losses.py @@ -0,0 +1,131 @@ +import torch +import torch.nn as nn + +class VitsGeneratorLoss(nn.Module): + def __init__(self): + super().__init__() + self.kl_loss_alpha = 45 + self.gen_loss_alpha = 1 + self.feat_loss_alpha = 1 + self.dur_loss_alpha = 1 + self.mel_loss_alpha = 1 + + @staticmethod + def feature_loss(feats_real, feats_generated): + loss = 0 + for dr, dg in zip(feats_real, feats_generated): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + return loss * 2 + + @staticmethod + def generator_loss(scores_fake): + loss = 0 + gen_losses = [] + for dg in scores_fake: + dg = dg.float() + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + + @staticmethod + def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): + """ + z_p, logs_q: [b, h, t_t] + m_p, logs_p: [b, h, t_t] + """ + z_p = z_p.float() + logs_q = logs_q.float() + m_p = m_p.float() + logs_p = logs_p.float() + z_mask = z_mask.float() + + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) + kl = torch.sum(kl * z_mask) + l = kl / torch.sum(z_mask) + return l + + + def forward( + self, + mel_slice, + mel_slice_hat, + z_p, + logs_q, + m_p, + logs_p, + z_mask, + scores_disc_fake, + feats_disc_fake, + feats_disc_real, + loss_duration, + ): + """ + Shapes: + - mel_slice : :math:`[B, 1, T]` + - mel_slice_hat: :math:`[B, 1, T]` + - z_p: :math:`[B, C, T]` + - logs_q: :math:`[B, C, T]` + - m_p: :math:`[B, C, T]` + - logs_p: :math:`[B, C, T]` + - z_len: :math:`[B]` + - scores_disc_fake[i]: :math:`[B, C]` + - feats_disc_fake[i][j]: :math:`[B, C, T', P]` + - feats_disc_real[i][j]: :math:`[B, C, T', P]` + """ + loss = 0.0 + return_dict = {} + # compute losses + loss_kl = ( + self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask.unsqueeze(1)) + * self.kl_loss_alpha + ) + loss_feat = ( + self.feature_loss(feats_real=feats_disc_real, feats_generated=feats_disc_fake) * self.feat_loss_alpha + ) + loss_gen = self.generator_loss(scores_fake=scores_disc_fake)[0] * self.gen_loss_alpha + loss_mel = torch.nn.functional.l1_loss(mel_slice, mel_slice_hat) * self.mel_loss_alpha + loss_duration = torch.sum(loss_duration.float()) * self.dur_loss_alpha + loss = loss_kl + loss_feat + loss_mel + loss_gen + loss_duration + + # pass losses to the dict + return_dict["loss_gen"] = loss_gen + return_dict["loss_kl"] = loss_kl + return_dict["loss_feat"] = loss_feat + return_dict["loss_mel"] = loss_mel + return_dict["loss_duration"] = loss_duration + return_dict["loss"] = loss + return return_dict + + +class VitsDiscriminatorLoss(nn.Module): + def __init__(self): + super().__init__() + + @staticmethod + def discriminator_loss(scores_real, scores_fake): + loss = 0 + real_losses = [] + fake_losses = [] + for dr, dg in zip(scores_real, scores_fake): + dr = dr.float() + dg = dg.float() + real_loss = torch.mean((1 - dr) ** 2) + fake_loss = torch.mean(dg**2) + loss += real_loss + fake_loss + real_losses.append(real_loss.item()) + fake_losses.append(fake_loss.item()) + return loss, real_losses, fake_losses + + def forward(self, scores_disc_real, scores_disc_fake): + loss = 0.0 + loss_disc, loss_disc_real, loss_disc_fake = self.discriminator_loss( + scores_real=scores_disc_real, scores_fake=scores_disc_fake + ) + + return loss_disc, loss_disc_real, loss_disc_fake \ No newline at end of file diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index a6fb16baeba3..a77c2a03ebed 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -38,6 +38,7 @@ # KlLoss import torch +from torch.autograd import Variable from nemo.core.classes import Loss, typecheck from nemo.core.neural_types.elements import LossType, VoidType @@ -142,7 +143,8 @@ def forward(self, disc_real_outputs, disc_generated_outputs): g_loss = torch.mean(dg ** 2) # real_loss += r_loss # gen_loss += g_loss - loss += torch.max(r_loss, g_loss) + # loss += torch.max(r_losпшеs, g_loss) + loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 2e6a56bf5559..18b1dde0e587 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -246,16 +246,18 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) # with autocast(enabled=True): + print(y.requires_grad, y_hat.detach().requires_grad) y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + print(y_d_hat_r[0].requires_grad) # with autocast(enabled=False): # loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g) # loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) loss_disc_all = loss_disc - # if self.global_step <= 180000: # train discriminator + print(loss_disc_all.requires_grad) optim_d.zero_grad() self.manual_backward(loss_disc_all) norm_d = clip_grad_value_(self.net_d.parameters(), None) diff --git a/nemo/collections/tts/models/vits_coqui.py b/nemo/collections/tts/models/vits_coqui.py new file mode 100644 index 000000000000..18e5921990e1 --- /dev/null +++ b/nemo/collections/tts/models/vits_coqui.py @@ -0,0 +1,507 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nemo.core import typecheck + +# typecheck.set_typecheck_enabled(False) + +import omegaconf +import torch +import wandb +from hydra.utils import instantiate +from omegaconf import DictConfig +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import WandbLogger +from torch.cuda.amp import autocast, GradScaler +from torch.nn import functional as F + +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler +from nemo.collections.tts.losses.vits_losses import ( + KlLoss, + FeatureMatchingLoss, + DiscriminatorLoss, + GeneratorLoss +) + +from nemo.collections.tts.losses.vits_coqui_losses import ( + VitsDiscriminatorLoss, + VitsGeneratorLoss, +) + +from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.modules.vits_coqui_modules import ( + MultiPeriodDiscriminator, + SynthesizerTrn, + audio_to_mel_torch, + clip_grad_value_, + slice_segments, + spec_to_mel_torch, +) +from nemo.core.classes.common import PretrainedModelInfo +from nemo.core.optim.lr_scheduler import CosineAnnealing +from nemo.utils import logging, model_utils + +class VitsModel(TextToWaveform): + def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): + # Convert to Hydra 1.0 compatible DictConfig + + cfg = model_utils.convert_model_config_to_dict_config(cfg) + cfg = model_utils.maybe_update_config_version(cfg) + + # setup normalizer + self.normalizer = None + self.text_normalizer_call = None + self.text_normalizer_call_kwargs = {} + self._setup_normalizer(cfg) + + # setup tokenizer + self.tokenizer = None + self._setup_tokenizer(cfg) + assert self.tokenizer is not None + + num_tokens = len(self.tokenizer.tokens) + self.tokenizer_pad = self.tokenizer.pad + self.tokenizer_unk = self.tokenizer.oov + + # self.scaler = GradScaler() + + super().__init__(cfg=cfg, trainer=trainer) + + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) + + self.feat_matching_loss = FeatureMatchingLoss() + self.disc_loss = DiscriminatorLoss() + self.gen_loss = GeneratorLoss() + self.kl_loss = KlLoss() + + self.log_train_images = False + self.logged_real_samples = False + self._tb_logger = None + self.hann_window = None + self.sample_rate = cfg.sample_rate + self.hop_size = cfg.n_window_stride + self.n_fft = cfg.train_ds.dataset.n_fft + self.win_length = cfg.train_ds.dataset.win_length + + # TODO: need to add SynthesizerTrn in config + self.net_g = SynthesizerTrn( + n_vocab=num_tokens, + spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, + segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, + inter_channels=cfg.inter_channels, + hidden_channels=cfg.hidden_channels, + filter_channels=cfg.filter_channels, + n_heads=cfg.n_heads, + n_layers=cfg.n_layers, + kernel_size=cfg.pitch_embedding_kernel_size, + p_dropout=cfg.p_dropout, + padding_idx=self.tokenizer_pad, + resblock=cfg.generator.resblock, + resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, + resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, + upsample_rates=cfg.generator.upsample_rates, + upsample_initial_channel=cfg.generator.upsample_initial_channel, + upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, + ) + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) + self.automatic_optimization = True + + window_fn = { + 'hann': torch.hann_window, + 'hamming': torch.hamming_window, + 'blackman': torch.blackman_window, + 'bartlett': torch.bartlett_window, + 'none': None, + }.get(self.hann_window, None) + + self.stft = lambda x: torch.stft( + input=x, + n_fft=self.n_fft, + hop_length=self.hop_size, + win_length=self.win_length, + window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + ) + + def _setup_normalizer(self, cfg): + if "text_normalizer" in cfg: + normalizer_kwargs = {} + + if "whitelist" in cfg.text_normalizer: + normalizer_kwargs["whitelist"] = self.register_artifact( + 'text_normalizer.whitelist', cfg.text_normalizer.whitelist + ) + + self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) + self.text_normalizer_call = self.normalizer.normalize + if "text_normalizer_call_kwargs" in cfg: + self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs + + def _setup_tokenizer(self, cfg): + text_tokenizer_kwargs = {} + if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: + g2p_kwargs = {} + + if "phoneme_dict" in cfg.text_tokenizer.g2p: + g2p_kwargs["phoneme_dict"] = self.register_artifact( + 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, + ) + + if "heteronyms" in cfg.text_tokenizer.g2p: + g2p_kwargs["heteronyms"] = self.register_artifact( + 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, + ) + + text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) + + self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) + + def parse(self, str_input: str) -> torch.tensor: + # TODO: Implement + pass + + def configure_optimizers(self): + optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) + scheduler_g_dict = { + 'scheduler': scheduler_g, + 'interval': 'step', + } + + scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} + return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] + + # only for inference + def forward(self, batch, batch_idx, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + with torch.no_grad(): + (y, y_lengths, x, x_lengths) = batch + # remove else + x = x[:1] + x_lengths = x_lengths[:1] + + y_hat, attn, mask, (z, z_p, m_p, logs_p) = self.net_g.infer(x, x_lengths, sid=sid, noise_scale=noise_scale, + length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.n_window_stride + return y_hat, y_hat_lengths, (z, z_p, m_p, logs_p) + + def get_spec(self, audio): + with torch.cuda.amp.autocast(enabled=False): + spec = self.stft(audio) + if spec.dtype in [torch.cfloat, torch.cdouble]: + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) + return spec + + def _freeze_layers(self): + if self.args.freeze_encoder: + for param in self.text_encoder.parameters(): + param.requires_grad = False + + if self.args.freeze_PE: + for param in self.posterior_encoder.parameters(): + param.requires_grad = False + + if self.args.freeze_DP: + for param in self.duration_predictor.parameters(): + param.requires_grad = False + + if self.args.freeze_flow_decoder: + for param in self.flow.parameters(): + param.requires_grad = False + + if self.args.freeze_waveform_decoder: + for param in self.waveform_decoder.parameters(): + param.requires_grad = False + + def training_step(self, batch, batch_idx, optimizer_idx: int): + """Perform a single training step. Run the model forward pass and compute losses. + Args: + batch (Dict): Input tensors. + criterion (nn.Module): Loss layer designed for the model. + optimizer_idx (int): Index of optimizer to use. 0 for the generator and 1 for the discriminator networks. + Returns: + Tuple[Dict, Dict]: Model ouputs and computed losses. + """ + (waveform, y_lengths, tokens, token_lenghts) = batch + + spec = self.get_spec(waveform) + spec_lens = self.audio_to_melspec_precessor.get_seq_len(y_lengths) + + # self._freeze_layers() + + # Discriminator + if optimizer_idx == 0: + # generator pass + outputs = self.net_g( + tokens, + token_lenghts, + spec, + spec_lens, + ) + + # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = outputs + # cache tensors for the generator pass + self.model_outputs_cache = outputs # pylint: disable=attribute-defined-outside-init + + y = torch.unsqueeze(waveform, 1) + y = slice_segments(y, self.model_outputs_cache["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) + # compute scores and features + + print(y.requires_grad, outputs["model_outputs"].detach().requires_grad) + y_d_hat_r, y_d_hat_g, _, _ = self.net_d( + y, outputs["model_outputs"].detach() + ) + print(y_d_hat_r[0].requires_grad) + # compute loss + with autocast(enabled=False): # use float32 for the criterion + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + disc_generated_outputs=y_d_hat_g) + loss_disc_all = loss_disc + loss_dict = { + "loss": loss_disc_all, + } + + for i, v in enumerate(losses_disc_r): + loss_dict[f"loss_disc_r_{i}"] = v + + for i, v in enumerate(losses_disc_g): + loss_dict[f"loss_disc_g_{i}"] = v + + self.log_dict(loss_dict, on_step=True, sync_dist=True) + print(loss_disc_all.requires_grad) + return loss_disc_all + + # Generator + if optimizer_idx == 1: + mel = spec_to_mel_torch( + spec, + self._cfg.n_window_size, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax, + ) + # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.model_outputs_cache + # compute melspec segment + with autocast(enabled=False): + mel_slice = slice_segments(mel, self.model_outputs_cache["slice_ids"], self._cfg.segment_size // self.cfg.n_window_stride) + mel_slice_hat = audio_to_mel_torch( + self.model_outputs_cache["model_outputs"].float().squeeze(1), + self._cfg.n_window_size, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self.cfg.n_window_stride, + self._cfg.preprocessor.n_window_size, + self._cfg.mel_fmin, + self._cfg.mel_fmax, + ) + y = torch.unsqueeze(waveform, 1) + y = slice_segments(y, self.model_outputs_cache["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) + # compute discriminator scores and features + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.disc( + y, self.model_outputs_cache["model_outputs"] + ) + + # compute losses + with autocast(enabled=False): # use float32 for the criterion + loss_dur = torch.sum(self.model_outputs_cache["loss_duration"].float()) + loss_mel = F.l1_loss(mel_slice, mel_slice_hat) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=self.model_outputs_cache["z_p"], + logs_q=self.model_outputs_cache["logs_q"], + m_p=self.model_outputs_cache["m_p"], + logs_p=self.model_outputs_cache["logs_p"], + z_mask=self.model_outputs_cache["z_mask"]) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + loss_dict = { + "loss": loss_gen_all, + "loss_gen": loss_gen, + "loss_fm": loss_fm, + "loss_mel * c_mel": loss_mel, + "loss_dur": loss_dur, + "loss_kl * c_kl": loss_kl, + } + + for i, v in enumerate(losses_gen): + loss_dict[f"loss_gen_i_{i}"] = v + + self.log_dict(loss_dict, on_step=True, sync_dist=True) + + return loss_gen_all + + raise ValueError(" [!] Unexpected `optimizer_idx`.") + + def _log(self, batch, outputs, name_prefix="train"): # pylint: disable=unused-argument,no-self-use + y_hat, l_length, attn, ids_slice, x_mask, z_mask, _ = outputs + (y, y_lengths, x, x_lengths) = batch + y_hat = y_hat.squeeze() + y_hat_lengths = z_mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length + mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) + logger = self.logger.experiment + # print(logger, self.logger) + if logger is not None and isinstance(self.logger, WandbLogger): + specs = [] + audios = [] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + caption=name_prefix +"_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=name_prefix +"_wav_target", + sample_rate=self.sample_rate, + ), + wandb.Audio( + y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=name_prefix +"_wav_predicted", + sample_rate=self.sample_rate, + ), + ] + + logger.log({"specs": specs, "audios": audios}) + + # def train_log( + # self, batch, outputs, logger, assets: dict, steps: int + # ): # pylint: disable=no-self-use + # """Create visualizations and waveform examples. + # For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to + # be projected onto Tensorboard. + # Args: + # ap (AudioProcessor): audio processor used at training. + # batch (Dict): Model inputs used at the previous training step. + # outputs (Dict): Model outputs generated at the previoud training step. + # Returns: + # Tuple[Dict, np.ndarray]: training plots and output waveform. + # """ + # self._log(batch, outputs, "train") + + def eval_step(self, batch: dict, criterion, optimizer_idx: int): + return self.train_step(batch, criterion, optimizer_idx) + + def validation_step(self, batch, batch_idx): + (y, y_lengths, x, x_lengths) = batch + + # TODO: fix hardcode + y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) + y_hat = y_hat.squeeze() + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length + + mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) + + # plot audio once per epoch + if batch_idx == 0: + logger = self.logger.experiment + # print(logger, self.logger) + if logger is not None and isinstance(self.logger, WandbLogger): + specs = [] + audios = [] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + caption=f"val_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_target", + sample_rate=self.sample_rate, + ), + wandb.Audio( + y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_predicted", + sample_rate=self.sample_rate, + ), + ] + + logger.log({"specs": specs, "audios": audios}) + + # def eval_log(self, batch: dict, outputs: dict, logger, assets: dict, steps: int) -> None: + # self._log(batch, outputs, "eval") + + def _loader(self, cfg): + try: + # _ = cfg.model.train_ds.manifest_filepath + _ = cfg['dataset']['manifest_filepath'] + except omegaconf.errors.MissingMandatoryValue: + logging.warning("manifest_filepath was skipped. No dataset for this model.") + return None + + dataset = instantiate( + cfg.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + return torch.utils.data.DataLoader( # noqa + dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, + ) + + def train_dataloader(self): + # default used by the Trainer + dataset = instantiate( + self.cfg.train_ds.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + + train_sampler = DistributedBucketSampler( + dataset, + self.cfg.train_ds.batch_sampler.batch_size, + [32,300,400,500,600,700,800,900,1000], + shuffle=True) + dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, + **self.cfg.train_ds.dataloader_params,) + print('made ddp loader') + return dataloader + + def setup_training_data(self, cfg): + self._train_dl = self._loader(cfg) + + def setup_validation_data(self, cfg): + self._validation_dl = self._loader(cfg) + + def setup_test_data(self, cfg): + """Omitted.""" + pass + + @classmethod + def list_available_models(cls) -> 'List[PretrainedModelInfo]': + list_of_models = [] + # TODO: List available models?? + return list_of_models + + def convert_text_to_waveform(self, *, tokens): + # TODO: Convert text to waveforms + pass diff --git a/nemo/collections/tts/modules/vits_coqui_modules.py b/nemo/collections/tts/modules/vits_coqui_modules.py new file mode 100644 index 000000000000..533115ce912f --- /dev/null +++ b/nemo/collections/tts/modules/vits_coqui_modules.py @@ -0,0 +1,1671 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math + +import numpy as np +import torch +from torch import nn +from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn import functional as F +from librosa.filters import mel as librosa_mel_fn +from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm + +from nemo.collections.tts.modules.monotonic_align import maximum_path + +# TODO: need to do LARGE refactoring + + +LRELU_SLOPE = 0.1 + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential( + nn.ReLU(), + nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size ** i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, + groups=channels, dilation=dilation, padding=padding + )) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): + super(WN, self).__init__() + assert (kernel_size % 2 == 1) + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size, + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, + dilation=dilation, padding=padding) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = fused_add_tanh_sigmoid_multiply( + x_in, + g_l, + n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, :self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels:, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + +# TODO: reuse from hifigan if it is possible? +class ResBlock1(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.convs1 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x, x_mask=None): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c2(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + +# TODO: reuse from hifigan if it is possible? +class ResBlock2(torch.nn.Module): + def __init__(self, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.convs = nn.ModuleList([ + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x, x_mask=None): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + if x_mask is not None: + xt = xt * x_mask + xt = c(xt) + x = xt + x + if x_mask is not None: + x = x * x_mask + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, + gin_channels=gin_channels) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + +class ConvFlow(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) + self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins:] + + x1, logabsdet = piecewise_rational_quadratic_transform(x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = Log() + self.flows = nn.ModuleList() + self.flows.append(ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) + logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_1 = LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.norm_2 = LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__(self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + + self.encoder = Encoder( + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout) + self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths): + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + n_flows=4, + gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append(Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__(self, + in_channels, + out_channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + +# TODO: reuse from hifigan if it is possible? +class Generator(torch.nn.Module): + def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = ResBlock1 if resblock == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append(weight_norm( + ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), + k, u, padding=(k-u)//2))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel//(2**(i+1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i*self.num_kernels+j](x) + else: + xs += self.resblocks[i*self.num_kernels+j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + +# TODO: reuse from hifigan if it is possible? +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ]) + self.dropout = nn.Dropout(0.3) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + +# TODO: reuse from hifigan if it is possible? +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.dropout = nn.Dropout(0.3) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + +# TODO: reuse from hifigan if it is possible? +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2,3,5,7,11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__(self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.padding_idx = padding_idx + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder(n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx) + self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) + self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, x, x_lengths, y, y_lengths, sid=None): + + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging + + # expand prior + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) + o = self.dec(z_slice, g=g) + + outputs = { + "model_outputs": o, + "alignments": attn.squeeze(1), + "loss_duration": l_length, + "z_mask": y_mask, + "m_p": m_p, + "logs_p": logs_p, + "z": z, + "z_p": z_p, + "m_q": m_q, + "logs_q": logs_q, + "slice_ids": ids_slice, + } + + return outputs + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 0: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = generate_path(w_ceil, attn_mask) + + m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale + z = self.flow(z_p, y_mask, g=g, reverse=True) + o = self.dec((z * y_mask)[:,:,:max_len], g=g) + return o, attn, y_mask, (z, z_p, m_p, logs_p) + + # TODO: do we really need it? Can be used for emotions conversion + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 0, "n_speakers have to be larger than 0." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) + +################## +# Mel_processing # +################## + +mel_basis = {} +hann_window = {} + +def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): + global mel_basis + dtype_device = str(spec.dtype) + '_' + str(spec.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + return spec + + +def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + dtype_device = str(y.dtype) + '_' + str(y.device) + fmax_dtype_device = str(fmax) + '_' + dtype_device + wnsize_dtype_device = str(win_size) + '_' + dtype_device + if fmax_dtype_device not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) + if wnsize_dtype_device not in hann_window: + hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) + + y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') + y = y.squeeze(1) + + spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], + center=center, pad_mode='reflect', normalized=False, onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) + + spec = torch.matmul(mel_basis[fmax_dtype_device], spec) + spec = spectral_normalize_torch(spec) + + return spec + + +########### +# Commons # +########### + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size*dilation - dilation)/2) + + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def kl_divergence(m_p, logs_p, m_q, logs_q): + """KL(P||Q)""" + kl = (logs_q - logs_p) - 0.5 + kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) + return kl + + +def rand_gumbel(shape): + """Sample from the Gumbel distribution, protect from overflows.""" + uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 + return -torch.log(-torch.log(uniform_samples)) + + +def rand_gumbel_like(x): + g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) + return g + + +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + ret[i] = x[i, :, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str_max = ids_str_max.to(device=x.device) + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + ret = slice_segments(x, ids_str, segment_size) + return ret, ids_str + + +def get_timing_signal_1d( + length, channels, min_timescale=1.0, max_timescale=1.0e4): + position = torch.arange(length, dtype=torch.float) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (num_timescales - 1)) + inv_timescales = min_timescale * torch.exp( + torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) + scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) + signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) + signal = F.pad(signal, [0, 0, 0, channels % 2]) + signal = signal.view(1, channels, length) + return signal + + +def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return x + signal.to(dtype=x.dtype, device=x.device) + + +def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): + b, channels, length = x.size() + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) + return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) + + +def subsequent_mask(length): + mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) + return mask + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + + +def shift_1d(x): + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] + return x + +# TODO: reuse from helpers get_mask_from_lengths? +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def generate_path(duration, mask): + """ + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] + """ + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2,3) * mask + return path + + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1. / norm_type) + return total_norm + + +############## +# Attentions # +############## +class Encoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))).unsqueeze(0).unsqueeze(0).to(device=x.device, dtype=x.dtype) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels**-0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert t_s == t_t, "Local attention is only available for self-attention." + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, + convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0,0],[0,0],[0,length-1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) + x_flat = x.view([batch, heads, length**2 + length*(length -1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + +############## +# Transforms # +############## + +DEFAULT_MIN_BIN_WIDTH = 1e-3 +DEFAULT_MIN_BIN_HEIGHT = 1e-3 +DEFAULT_MIN_DERIVATIVE = 1e-3 + + +def piecewise_rational_quadratic_transform(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = { + 'tails': tails, + 'tail_bound': tail_bound + } + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum( + inputs[..., None] >= bin_locations, + dim=-1 + ) - 1 + + +def unconstrained_rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == 'linear': + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError('{} tails are not implemented.'.format(tails)) + + outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative + ) + + return outputs, logabsdet + +def rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0., right=1., bottom=0., top=1., + min_bin_width=DEFAULT_MIN_BIN_WIDTH, + min_bin_height=DEFAULT_MIN_BIN_HEIGHT, + min_derivative=DEFAULT_MIN_DERIVATIVE): + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError('Input to a transform is not within its domain') + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError('Minimal bin width too large for the number of bins') + if min_bin_height * num_bins > 1.0: + raise ValueError('Minimal bin height too large for the number of bins') + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (((inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta) + + input_heights * (input_delta - input_derivatives))) + b = (input_heights * input_derivatives + - (inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta)) + c = - input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet From 8769c576924f9323532b7b2ae3b2b1933bb5dc51 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sat, 6 Aug 2022 06:52:17 -0700 Subject: [PATCH 129/244] exps --- nemo/collections/tts/losses/vits_losses.py | 7 +- nemo/collections/tts/models/vits.py | 6 +- nemo/collections/tts/models/vits_coqui.py | 196 ++++++++++--------- nemo/collections/tts/modules/vits_modules.py | 1 + 4 files changed, 110 insertions(+), 100 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index a77c2a03ebed..ded9db3a0a86 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -136,15 +136,16 @@ def forward(self, disc_real_outputs, disc_generated_outputs): r_losses = [] g_losses = [] loss = 0 - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + for i, (dr, dg) in enumerate(zip(disc_real_outputs, disc_generated_outputs)): dr = dr.float() dg = dg.float() r_loss = torch.mean((1 - dr) ** 2) g_loss = torch.mean(dg ** 2) # real_loss += r_loss # gen_loss += g_loss - # loss += torch.max(r_losпшеs, g_loss) - loss += r_loss + g_loss + if i == 0: + loss += torch.max(r_loss, g_loss) * 0.5 + # loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 18b1dde0e587..ed3a595000d0 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -246,9 +246,9 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) # with autocast(enabled=True): - print(y.requires_grad, y_hat.detach().requires_grad) + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) - print(y_d_hat_r[0].requires_grad) + # with autocast(enabled=False): # loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, @@ -257,7 +257,7 @@ def training_step(self, batch, batch_idx): loss_disc_all = loss_disc # if self.global_step <= 180000: # train discriminator - print(loss_disc_all.requires_grad) + optim_d.zero_grad() self.manual_backward(loss_disc_all) norm_d = clip_grad_value_(self.net_d.parameters(), None) diff --git a/nemo/collections/tts/models/vits_coqui.py b/nemo/collections/tts/models/vits_coqui.py index 18e5921990e1..ea41db34e9af 100644 --- a/nemo/collections/tts/models/vits_coqui.py +++ b/nemo/collections/tts/models/vits_coqui.py @@ -41,13 +41,17 @@ from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_coqui_modules import ( - MultiPeriodDiscriminator, SynthesizerTrn, audio_to_mel_torch, clip_grad_value_, slice_segments, spec_to_mel_torch, ) + +from nemo.collections.tts.modules.vits_modules import ( + MultiPeriodDiscriminator, +) + from nemo.core.classes.common import PretrainedModelInfo from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils @@ -115,7 +119,7 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, ) self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) - self.automatic_optimization = True + self.automatic_optimization = False window_fn = { 'hann': torch.hann_window, @@ -226,7 +230,7 @@ def _freeze_layers(self): for param in self.waveform_decoder.parameters(): param.requires_grad = False - def training_step(self, batch, batch_idx, optimizer_idx: int): + def training_step(self, batch, batch_idx): """Perform a single training step. Run the model forward pass and compute losses. Args: batch (Dict): Input tensors. @@ -235,6 +239,8 @@ def training_step(self, batch, batch_idx, optimizer_idx: int): Returns: Tuple[Dict, Dict]: Model ouputs and computed losses. """ + optim_g, optim_d = self.optimizers() + (waveform, y_lengths, tokens, token_lenghts) = batch spec = self.get_spec(waveform) @@ -243,108 +249,110 @@ def training_step(self, batch, batch_idx, optimizer_idx: int): # self._freeze_layers() # Discriminator - if optimizer_idx == 0: - # generator pass - outputs = self.net_g( - tokens, - token_lenghts, - spec, - spec_lens, - ) + # generator pass + outputs = self.net_g( + tokens, + token_lenghts, + spec, + spec_lens, + ) - # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = outputs - # cache tensors for the generator pass - self.model_outputs_cache = outputs # pylint: disable=attribute-defined-outside-init + # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = outputs - y = torch.unsqueeze(waveform, 1) - y = slice_segments(y, self.model_outputs_cache["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) - # compute scores and features - - print(y.requires_grad, outputs["model_outputs"].detach().requires_grad) - y_d_hat_r, y_d_hat_g, _, _ = self.net_d( - y, outputs["model_outputs"].detach() - ) - print(y_d_hat_r[0].requires_grad) - # compute loss - with autocast(enabled=False): # use float32 for the criterion - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - disc_generated_outputs=y_d_hat_g) - loss_disc_all = loss_disc - loss_dict = { - "loss": loss_disc_all, - } + y = torch.unsqueeze(waveform, 1) + y = slice_segments(y, outputs["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) + # compute scores and features + + y_d_hat_r, y_d_hat_g, _, _ = self.net_d( + y, outputs["model_outputs"].detach() + ) - for i, v in enumerate(losses_disc_r): - loss_dict[f"loss_disc_r_{i}"] = v + optim_d.zero_grad() + # compute loss + with autocast(enabled=False): # use float32 for the criterion + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + disc_generated_outputs=y_d_hat_g) + loss_disc_all = loss_disc - for i, v in enumerate(losses_disc_g): - loss_dict[f"loss_disc_g_{i}"] = v - - self.log_dict(loss_dict, on_step=True, sync_dist=True) - print(loss_disc_all.requires_grad) - return loss_disc_all + self.manual_backward(loss_disc_all) + optim_d.step() + + loss_dict = { + "loss_disc_all": loss_disc_all, + } + + for i, v in enumerate(losses_disc_r): + loss_dict[f"loss_disc_r_{i}"] = v + + for i, v in enumerate(losses_disc_g): + loss_dict[f"loss_disc_g_{i}"] = v # Generator - if optimizer_idx == 1: - mel = spec_to_mel_torch( - spec, + + + + mel = spec_to_mel_torch( + spec, + self._cfg.n_window_size, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax, + ) + # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.model_outputs_cache + # compute melspec segment + with autocast(enabled=False): + mel_slice = slice_segments(mel, outputs["slice_ids"], self._cfg.segment_size // self.cfg.n_window_stride) + mel_slice_hat = audio_to_mel_torch( + outputs["model_outputs"].float().squeeze(1), self._cfg.n_window_size, self._cfg.n_mel_channels, self._cfg.sample_rate, + self.cfg.n_window_stride, + self._cfg.preprocessor.n_window_size, self._cfg.mel_fmin, self._cfg.mel_fmax, ) - # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.model_outputs_cache - # compute melspec segment - with autocast(enabled=False): - mel_slice = slice_segments(mel, self.model_outputs_cache["slice_ids"], self._cfg.segment_size // self.cfg.n_window_stride) - mel_slice_hat = audio_to_mel_torch( - self.model_outputs_cache["model_outputs"].float().squeeze(1), - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self.cfg.n_window_stride, - self._cfg.preprocessor.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - y = torch.unsqueeze(waveform, 1) - y = slice_segments(y, self.model_outputs_cache["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) - # compute discriminator scores and features - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.disc( - y, self.model_outputs_cache["model_outputs"] - ) + y = torch.unsqueeze(waveform, 1) + y = slice_segments(y, outputs["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) + # compute discriminator scores and features + + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d( + y, outputs["model_outputs"] + ) + + optim_g.zero_grad() + # compute losses + with autocast(enabled=False): # use float32 for the criterion + loss_dur = torch.sum(outputs["loss_duration"].float()) + loss_mel = F.l1_loss(mel_slice, mel_slice_hat) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=outputs["z_p"], + logs_q=outputs["logs_q"], + m_p=outputs["m_p"], + logs_p=outputs["logs_p"], + z_mask=outputs["z_mask"]) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + + self.manual_backward(loss_gen_all) + optim_g.step() + + loss_dict.update({ + "loss_gen_all": loss_gen_all, + "loss_gen": loss_gen, + "loss_fm": loss_fm, + "loss_mel * c_mel": loss_mel, + "loss_dur": loss_dur, + "loss_kl * c_kl": loss_kl, + } + ) + + for i, v in enumerate(losses_gen): + loss_dict[f"loss_gen_i_{i}"] = v - # compute losses - with autocast(enabled=False): # use float32 for the criterion - loss_dur = torch.sum(self.model_outputs_cache["loss_duration"].float()) - loss_mel = F.l1_loss(mel_slice, mel_slice_hat) * self._cfg.c_mel - loss_kl = self.kl_loss(z_p=self.model_outputs_cache["z_p"], - logs_q=self.model_outputs_cache["logs_q"], - m_p=self.model_outputs_cache["m_p"], - logs_p=self.model_outputs_cache["logs_p"], - z_mask=self.model_outputs_cache["z_mask"]) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) - loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - - loss_dict = { - "loss": loss_gen_all, - "loss_gen": loss_gen, - "loss_fm": loss_fm, - "loss_mel * c_mel": loss_mel, - "loss_dur": loss_dur, - "loss_kl * c_kl": loss_kl, - } - - for i, v in enumerate(losses_gen): - loss_dict[f"loss_gen_i_{i}"] = v - - self.log_dict(loss_dict, on_step=True, sync_dist=True) - - return loss_gen_all - - raise ValueError(" [!] Unexpected `optimizer_idx`.") + self.log_dict(loss_dict, on_step=True, sync_dist=True) def _log(self, batch, outputs, name_prefix="train"): # pylint: disable=unused-argument,no-self-use y_hat, l_length, attn, ids_slice, x_mask, z_mask, _ = outputs @@ -399,8 +407,8 @@ def _log(self, batch, outputs, name_prefix="train"): # pylint: disable=unused-a # """ # self._log(batch, outputs, "train") - def eval_step(self, batch: dict, criterion, optimizer_idx: int): - return self.train_step(batch, criterion, optimizer_idx) + def eval_step(self, batch: dict, criterion): + return self.train_step(batch, criterion) def validation_step(self, batch, batch_idx): (y, y_lengths, x, x_lengths) = batch diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index dbf18d2f7dd5..9bba7200dd8a 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -752,6 +752,7 @@ def forward(self, x): for l in self.convs: x = l(x) + x = self.dropout(x) x = F.leaky_relu(x, LRELU_SLOPE) fmap.append(x) x = self.conv_post(x) From d49aeed92785f86a608582678c53951ab94b457b Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 8 Aug 2022 06:16:42 -0700 Subject: [PATCH 130/244] bugfix --- nemo/collections/tts/losses/vits_losses.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index ded9db3a0a86..66abdc48b34c 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -145,6 +145,8 @@ def forward(self, disc_real_outputs, disc_generated_outputs): # gen_loss += g_loss if i == 0: loss += torch.max(r_loss, g_loss) * 0.5 + else: + loss += torch.max(r_loss, g_loss) # loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) From a68e96527cf13be9557732275b39e25a404df199 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 22 Aug 2022 03:30:20 -0700 Subject: [PATCH 131/244] added side file --- examples/tts/vits.py | 4 +- nemo/collections/tts/models/vits_test.py | 432 +++++++++++++++++++++++ 2 files changed, 433 insertions(+), 3 deletions(-) create mode 100644 nemo/collections/tts/models/vits_test.py diff --git a/examples/tts/vits.py b/examples/tts/vits.py index e83da88b0184..e5c8b56a17c3 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -13,11 +13,9 @@ # limitations under the License. import pytorch_lightning as pl -from pytorch_lightning.plugins.precision.native_amp import NativeMixedPrecisionPlugin -from torch.cuda.amp import GradScaler from nemo.collections.common.callbacks import LogEpochTimeCallback -from nemo.collections.tts.models.vits import VitsModel +from nemo.collections.tts.models.vits_test import VitsModel from nemo.core.config import hydra_runner from nemo.utils.exp_manager import exp_manager diff --git a/nemo/collections/tts/models/vits_test.py b/nemo/collections/tts/models/vits_test.py new file mode 100644 index 000000000000..707ac8b3ee17 --- /dev/null +++ b/nemo/collections/tts/models/vits_test.py @@ -0,0 +1,432 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from nemo.core import typecheck + +# typecheck.set_typecheck_enabled(False) + +import omegaconf +import torch +import wandb +from hydra.utils import instantiate +from omegaconf import DictConfig +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import WandbLogger +from torch.cuda.amp import autocast, GradScaler +from torch.nn import functional as F + +from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler +from nemo.collections.tts.losses.vits_losses import ( + KlLoss, + FeatureMatchingLoss, + DiscriminatorLoss, + GeneratorLoss +) +from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.modules.vits_modules import ( + MultiPeriodDiscriminator, + SynthesizerTrn, + audio_to_mel_torch, + clip_grad_value_, + slice_segments, + spec_to_mel_torch, +) +from nemo.core.classes.common import PretrainedModelInfo +from nemo.core.optim.lr_scheduler import CosineAnnealing +from nemo.utils import logging, model_utils + +class VitsModel(TextToWaveform): + def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): + # Convert to Hydra 1.0 compatible DictConfig + + cfg = model_utils.convert_model_config_to_dict_config(cfg) + cfg = model_utils.maybe_update_config_version(cfg) + + # setup normalizer + self.normalizer = None + self.text_normalizer_call = None + self.text_normalizer_call_kwargs = {} + self._setup_normalizer(cfg) + + # setup tokenizer + self.tokenizer = None + self._setup_tokenizer(cfg) + assert self.tokenizer is not None + + num_tokens = len(self.tokenizer.tokens) + self.tokenizer_pad = self.tokenizer.pad + self.tokenizer_unk = self.tokenizer.oov + + # self.scaler = GradScaler() + + super().__init__(cfg=cfg, trainer=trainer) + + self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) + + self.feat_matching_loss = FeatureMatchingLoss() + self.disc_loss = DiscriminatorLoss() + self.gen_loss = GeneratorLoss() + self.kl_loss = KlLoss() + + self.log_train_images = False + self.logged_real_samples = False + self._tb_logger = None + self.hann_window = None + self.sample_rate = cfg.sample_rate + self.hop_size = cfg.n_window_stride + self.n_fft = cfg.train_ds.dataset.n_fft + self.win_length = cfg.train_ds.dataset.win_length + + # TODO: need to add SynthesizerTrn in config + self.net_g = SynthesizerTrn( + n_vocab=num_tokens, + spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, + segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, + inter_channels=cfg.inter_channels, + hidden_channels=cfg.hidden_channels, + filter_channels=cfg.filter_channels, + n_heads=cfg.n_heads, + n_layers=cfg.n_layers, + kernel_size=cfg.pitch_embedding_kernel_size, + p_dropout=cfg.p_dropout, + padding_idx=self.tokenizer_pad, + resblock=cfg.generator.resblock, + resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, + resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, + upsample_rates=cfg.generator.upsample_rates, + upsample_initial_channel=cfg.generator.upsample_initial_channel, + upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, + ) + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) + self.automatic_optimization = True + + window_fn = { + 'hann': torch.hann_window, + 'hamming': torch.hamming_window, + 'blackman': torch.blackman_window, + 'bartlett': torch.bartlett_window, + 'none': None, + }.get(self.hann_window, None) + + self.stft = lambda x: torch.stft( + input=x, + n_fft=self.n_fft, + hop_length=self.hop_size, + win_length=self.win_length, + window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + ) + + def _setup_normalizer(self, cfg): + if "text_normalizer" in cfg: + normalizer_kwargs = {} + + if "whitelist" in cfg.text_normalizer: + normalizer_kwargs["whitelist"] = self.register_artifact( + 'text_normalizer.whitelist', cfg.text_normalizer.whitelist + ) + + self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) + self.text_normalizer_call = self.normalizer.normalize + if "text_normalizer_call_kwargs" in cfg: + self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs + + def _setup_tokenizer(self, cfg): + text_tokenizer_kwargs = {} + if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: + g2p_kwargs = {} + + if "phoneme_dict" in cfg.text_tokenizer.g2p: + g2p_kwargs["phoneme_dict"] = self.register_artifact( + 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, + ) + + if "heteronyms" in cfg.text_tokenizer.g2p: + g2p_kwargs["heteronyms"] = self.register_artifact( + 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, + ) + + text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) + + self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) + + def parse(self, str_input: str) -> torch.tensor: + # TODO: Implement + pass + + def configure_optimizers(self): + optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) + + max_steps=800000 + min_lr = 1e-5 + wu_ratio = 0.02 + wu_steps = 16000 + + scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=max_steps, min_lr=min_lr, warmup_steps=wu_steps,) + scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) + + # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) + # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) + scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} + scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} + + return [optim_d, optim_g], [scheduler_d_dict, scheduler_g_dict] + + # only for inference + def forward(self, batch, batch_idx, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + with torch.no_grad(): + (y, y_lengths, x, x_lengths) = batch + # remove else + x = x[:1] + x_lengths = x_lengths[:1] + + y_hat, attn, mask, (z, z_p, m_p, logs_p) = self.net_g.infer(x, x_lengths, sid=sid, noise_scale=noise_scale, + length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=1000) + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.n_window_stride + return y_hat, y_hat_lengths, (z, z_p, m_p, logs_p) + + def get_spec(self, audio): + with torch.cuda.amp.autocast(enabled=False): + spec = self.stft(audio) + if spec.dtype in [torch.cfloat, torch.cdouble]: + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) + return spec + + def training_step(self, batch, batch_idx, optimizer_idx): + # get optimizers + # optim_g, optim_d = self.optimizers() + + (y, y_lengths, x, x_lengths) = batch + + spec = self.get_spec(y) + spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) + + # train discriminator + if optimizer_idx == 0: + # with autocast(enabled=True): + # with torch.no_grad(): + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + x, x_lengths, spec, spec_lengths + ) + print(y_hat.requires_grad) + self.stash = y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + y = torch.unsqueeze(y, 1) + y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + # with autocast(enabled=True): + + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + # with autocast(enabled=False): + # loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + disc_generated_outputs=y_d_hat_g) + # loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) + loss_disc_all = loss_disc + # if self.global_step <= 180000: + # train discriminator + # optim_d.zero_grad() + # self.manual_backward(loss_disc_all) + norm_d = clip_grad_value_(self.net_d.parameters(), None) + # optim_d.step() + + metrics = { + "loss_disc_all": loss_disc_all, + "grad_disc": norm_d, + } + + for i, v in enumerate(losses_disc_r): + metrics[f"loss_disc_r_{i}"] = v + + for i, v in enumerate(losses_disc_g): + metrics[f"loss_disc_g_{i}"] = v + + self.log_dict(metrics, on_step=True, sync_dist=True) + print('disc', loss_disc_all) + return loss_disc_all + + # train generator + if optimizer_idx == 1: + # with autocast(enabled=True): + mel = spec_to_mel_torch( + spec, + self._cfg.n_window_size, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self._cfg.mel_fmin, + self._cfg.mel_fmax, + ) + + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.stash + print(y_hat.requires_grad) + y = torch.unsqueeze(y, 1) + y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) + + y_hat = y_hat.float() + y_hat_mel = audio_to_mel_torch( + y_hat.squeeze(1), + self._cfg.n_window_size, + self._cfg.n_mel_channels, + self._cfg.sample_rate, + self.cfg.n_window_stride, + self._cfg.preprocessor.n_window_size, + self._cfg.mel_fmin, + self._cfg.mel_fmax, + ) + + # Generator + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + # with autocast(enabled=False): + loss_dur = torch.sum(l_length.float()) + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + # if loss_gen > loss_disc: + # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl + loss_gen + # else: + # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl + + # train generator + # optim_g.zero_grad() + # self.manual_backward(loss_gen_all) + norm_g = clip_grad_value_(self.net_g.parameters(), None) + # optim_g.step() + + metrics = { + "loss_gen": loss_gen, + "loss_fm": loss_fm, + "loss_mel * c_mel": loss_mel, + "loss_dur": loss_dur, + "loss_kl * c_kl": loss_kl, + "loss_gen_all": loss_gen_all, + "grad_gen": norm_g, + } + + for i, v in enumerate(losses_gen): + metrics[f"loss_gen_i_{i}"] = v + + self.log_dict(metrics, on_step=True, sync_dist=True) + print('gen', loss_gen_all) + return loss_gen_all + + # schedulers = self.lr_schedulers() + # if schedulers is not None:# and self.trainer.is_last_batch: + # sch1, sch2 = schedulers + # sch1.step() + # sch2.step() + + + + def validation_step(self, batch, batch_idx): + (y, y_lengths, x, x_lengths) = batch + + # TODO: fix hardcode + y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) + y_hat = y_hat.squeeze() + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length + + mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) + + # plot audio once per epoch + if batch_idx == 0: + logger = self.logger.experiment + # print(logger, self.logger) + if logger is not None and isinstance(self.logger, WandbLogger): + specs = [] + audios = [] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), + caption=f"val_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_target", + sample_rate=self.sample_rate, + ), + wandb.Audio( + y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_predicted", + sample_rate=self.sample_rate, + ), + ] + + logger.log({"specs": specs, "audios": audios}) + + def _loader(self, cfg): + try: + # _ = cfg.model.train_ds.manifest_filepath + _ = cfg['dataset']['manifest_filepath'] + except omegaconf.errors.MissingMandatoryValue: + logging.warning("manifest_filepath was skipped. No dataset for this model.") + return None + + dataset = instantiate( + cfg.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + return torch.utils.data.DataLoader( # noqa + dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, + ) + + def train_dataloader(self): + # default used by the Trainer + dataset = instantiate( + self.cfg.train_ds.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + + train_sampler = DistributedBucketSampler( + dataset, + self.cfg.train_ds.batch_sampler.batch_size, + [32,300,400,500,600,700,800,900,1000], + shuffle=True) + dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, + **self.cfg.train_ds.dataloader_params,) + return dataloader + + def setup_training_data(self, cfg): + self._train_dl = self._loader(cfg) + + def setup_validation_data(self, cfg): + self._validation_dl = self._loader(cfg) + + def setup_test_data(self, cfg): + """Omitted.""" + pass + + @classmethod + def list_available_models(cls) -> 'List[PretrainedModelInfo]': + list_of_models = [] + # TODO: List available models?? + return list_of_models + + def convert_text_to_waveform(self, *, tokens): + # TODO: Convert text to waveforms + pass From 5d6fd1fb7bae93c3ae77cb0dd3cfccc6d4158410 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 22 Aug 2022 06:46:14 -0700 Subject: [PATCH 132/244] bugfix --- nemo/collections/tts/models/vits_test.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/nemo/collections/tts/models/vits_test.py b/nemo/collections/tts/models/vits_test.py index 707ac8b3ee17..048c50dc1358 100644 --- a/nemo/collections/tts/models/vits_test.py +++ b/nemo/collections/tts/models/vits_test.py @@ -216,11 +216,11 @@ def training_step(self, batch, batch_idx, optimizer_idx): # train discriminator if optimizer_idx == 0: # with autocast(enabled=True): - # with torch.no_grad(): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths - ) - print(y_hat.requires_grad) + with torch.no_grad(): + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + x, x_lengths, spec, spec_lengths + ) + self.stash = y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) @@ -267,8 +267,10 @@ def training_step(self, batch, batch_idx, optimizer_idx): self._cfg.mel_fmax, ) - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.stash - print(y_hat.requires_grad) + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + x, x_lengths, spec, spec_lengths + ) + y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) From 44be3adf0f1ea6e3697f1fa310256e107e90e22f Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 23 Aug 2022 02:50:47 -0700 Subject: [PATCH 133/244] reverted --- examples/tts/vits.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index e5c8b56a17c3..57abf017da7a 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -15,7 +15,7 @@ import pytorch_lightning as pl from nemo.collections.common.callbacks import LogEpochTimeCallback -from nemo.collections.tts.models.vits_test import VitsModel +from nemo.collections.tts.models.vits import VitsModel from nemo.core.config import hydra_runner from nemo.utils.exp_manager import exp_manager From 1d37c1e951bf69c98dacfe2086641e7d8b447c36 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 29 Aug 2022 06:32:21 -0700 Subject: [PATCH 134/244] fixed sampler behaviour --- examples/tts/vits.py | 3 ++- nemo/collections/tts/helpers/helpers.py | 5 +++-- nemo/collections/tts/models/vits.py | 9 ++++++++- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 57abf017da7a..45be264faecf 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from pickle import FALSE import pytorch_lightning as pl from nemo.collections.common.callbacks import LogEpochTimeCallback @@ -28,7 +29,7 @@ def main(cfg): # plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) trainer = pl.Trainer(replace_sampler_ddp=False, **cfg.trainer) - # trainer = pl.Trainer(plugins=plugins, **cfg.trainer) + # trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index 760f6804a773..d4809c81ce72 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -594,8 +594,9 @@ def _create_buckets(self): def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() - g.manual_seed(self.epoch) - + s = torch.randint(0, 30, (1,)) + g.manual_seed(int(s) + self.epoch) + print('Epoch:', s + self.epoch) indices = [] if self.shuffle: for bucket in self.buckets: diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index ed3a595000d0..6fab8cdc4487 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -212,7 +212,8 @@ def training_step(self, batch, batch_idx): optim_g, optim_d = self.optimizers() (y, y_lengths, x, x_lengths) = batch - + # if batch_idx == 0: + # print('tokens:', x[0][:10]) spec = self.get_spec(y) spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) @@ -357,6 +358,12 @@ def validation_step(self, batch, batch_idx): ] logger.log({"specs": specs, "audios": audios}) + + + # def on_train_epoch_start(self): + # print(self.current_epoch, self.val_dataloader) + # self.trainer.train_dataloader.sampler.set_epoch(self.current_epoch) + def _loader(self, cfg): try: From 11c9828281cc6c763166cc08684815dea3721200 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 30 Aug 2022 04:53:01 -0700 Subject: [PATCH 135/244] updated for ptl 1.7.2 --- examples/tts/conf/vits.yaml | 3 +-- nemo/collections/tts/helpers/helpers.py | 18 ++++++++++++++---- requirements/requirements_lightning.txt | 2 +- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index a39f2f845150..7debebfb059d 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -208,10 +208,9 @@ trainer: max_epochs: 1000000 accumulate_grad_batches: 1 # gradient_clip_val: 1000.0 - checkpoint_callback: false # Provided by exp_manager + enable_checkpointing: false # Provided by exp_manager logger: false # Provided by exp_manager log_every_n_steps: 50 - flush_logs_every_n_steps: 1000 check_val_every_n_epoch: 1 exp_manager: diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index d4809c81ce72..fbddf7a34141 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -594,9 +594,8 @@ def _create_buckets(self): def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() - s = torch.randint(0, 30, (1,)) - g.manual_seed(int(s) + self.epoch) - print('Epoch:', s + self.epoch) + g.manual_seed(self.epoch) + print('Epoch:', self.epoch) indices = [] if self.shuffle: for bucket in self.buckets: @@ -648,4 +647,15 @@ def _bisect(self, x, lo=0, hi=None): return -1 def __len__(self): - return self.num_samples // self.batch_size \ No newline at end of file + return self.num_samples // self.batch_size + + def set_epoch(self, epoch: int) -> None: + """ + Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas + use a different random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + Args: + epoch (int): Epoch number. + """ + print(f'Set epoch{epoch}') + self.epoch = epoch \ No newline at end of file diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index b29ef23ec915..36202771a588 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning==1.6.1 +pytorch-lightning==1.7.2 torchmetrics>=0.4.1rc0 transformers>=4.0.1 webdataset>=0.1.48,<=0.1.62 From 7b6b95ce5fc5c3265c1fe770a85313f985823324 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 5 Sep 2022 06:29:13 -0700 Subject: [PATCH 136/244] refactored dataloader func --- nemo/collections/tts/models/vits.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 6fab8cdc4487..e840ff92bbe5 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -287,7 +287,7 @@ def training_step(self, batch, batch_idx): optim_g.step() schedulers = self.lr_schedulers() - if schedulers is not None:# and self.trainer.is_last_batch: + if schedulers is not None and self.trainer.is_last_batch: sch1, sch2 = schedulers sch1.step() sch2.step() @@ -318,7 +318,6 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): (y, y_lengths, x, x_lengths) = batch - # TODO: fix hardcode y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) y_hat = y_hat.squeeze() y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length @@ -358,16 +357,9 @@ def validation_step(self, batch, batch_idx): ] logger.log({"specs": specs, "audios": audios}) - - - # def on_train_epoch_start(self): - # print(self.current_epoch, self.val_dataloader) - # self.trainer.train_dataloader.sampler.set_epoch(self.current_epoch) - def _loader(self, cfg): try: - # _ = cfg.model.train_ds.manifest_filepath _ = cfg['dataset']['manifest_filepath'] except omegaconf.errors.MissingMandatoryValue: logging.warning("manifest_filepath was skipped. No dataset for this model.") @@ -395,11 +387,11 @@ def train_dataloader(self): train_sampler = DistributedBucketSampler( dataset, self.cfg.train_ds.batch_sampler.batch_size, - [32,300,400,500,600,700,800,900,1000], - shuffle=True) + self.cfg.train_ds.batch_sampler.boundaries, + shuffle=self.cfg.train_ds.batch_sampler.shuffle) + dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, **self.cfg.train_ds.dataloader_params,) - print('made ddp loader') return dataloader def setup_training_data(self, cfg): From e30f91e64a0c528a78f6464e55a1b7b311063541 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 6 Sep 2022 05:28:36 -0700 Subject: [PATCH 137/244] some cleaning --- nemo/collections/tts/helpers/helpers.py | 1 - nemo/collections/tts/models/vits.py | 31 +++++++++---------------- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index fbddf7a34141..49659061f316 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -657,5 +657,4 @@ def set_epoch(self, epoch: int) -> None: Args: epoch (int): Epoch number. """ - print(f'Set epoch{epoch}') self.epoch = epoch \ No newline at end of file diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index e840ff92bbe5..d566badf89a3 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -173,17 +173,15 @@ def configure_optimizers(self): wu_ratio = 0.02 wu_steps = 16000 - scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=max_steps, min_lr=min_lr, warmup_steps=wu_steps,) - scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) - - # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) - # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) - scheduler_g_dict = { - 'scheduler': scheduler_g, - 'interval': 'step', - } + # scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=max_steps, min_lr=min_lr, warmup_steps=wu_steps,) + # scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) + scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} + return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] # only for inference @@ -212,8 +210,7 @@ def training_step(self, batch, batch_idx): optim_g, optim_d = self.optimizers() (y, y_lengths, x, x_lengths) = batch - # if batch_idx == 0: - # print('tokens:', x[0][:10]) + spec = self.get_spec(y) spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) @@ -246,19 +243,17 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + # with autocast(enabled=True): - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) # with autocast(enabled=False): - # loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g) - # loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) loss_disc_all = loss_disc - # if self.global_step <= 180000: - # train discriminator + + # train discriminator optim_d.zero_grad() self.manual_backward(loss_disc_all) norm_d = clip_grad_value_(self.net_d.parameters(), None) @@ -275,10 +270,6 @@ def training_step(self, batch, batch_idx): loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - # if loss_gen > loss_disc: - # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl + loss_gen - # else: - # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl # train generator optim_g.zero_grad() From 170c76b7c292cb5f3ae100ab5868989c45d6b280 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 6 Sep 2022 05:28:51 -0700 Subject: [PATCH 138/244] reverted to vanilla loss --- nemo/collections/tts/losses/vits_losses.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 66abdc48b34c..fbae9f11ba1b 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -143,11 +143,11 @@ def forward(self, disc_real_outputs, disc_generated_outputs): g_loss = torch.mean(dg ** 2) # real_loss += r_loss # gen_loss += g_loss - if i == 0: - loss += torch.max(r_loss, g_loss) * 0.5 - else: - loss += torch.max(r_loss, g_loss) - # loss += r_loss + g_loss + # if i == 0: + # loss += torch.max(r_loss, g_loss) * 0.5 + # else: + # loss += torch.max(r_loss, g_loss) + loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) From c4e537ee22f2b3b4a0e47e7220418c4b96992bb4 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 13 Sep 2022 12:26:37 -0700 Subject: [PATCH 139/244] modified for pickling --- nemo/collections/tts/helpers/helpers.py | 27 +++++++++--------- nemo/collections/tts/torch/data.py | 29 +++++++++++++------- nemo/collections/tts/torch/tts_tokenizers.py | 3 +- 3 files changed, 34 insertions(+), 25 deletions(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index 49659061f316..515cc4b6d886 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -565,11 +565,11 @@ def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None self.lengths = dataset.lengths self.batch_size = batch_size self.boundaries = boundaries - + self.buckets, self.num_samples_per_bucket = self._create_buckets() self.total_size = sum(self.num_samples_per_bucket) self.num_samples = self.total_size // self.num_replicas - + def _create_buckets(self): buckets = [[] for _ in range(len(self.boundaries) - 1)] for i in range(len(self.lengths)): @@ -577,12 +577,12 @@ def _create_buckets(self): idx_bucket = self._bisect(length) if idx_bucket != -1: buckets[idx_bucket].append(i) - + for i in range(len(buckets) - 1, 0, -1): if len(buckets[i]) == 0: buckets.pop(i) self.boundaries.pop(i+1) - + num_samples_per_bucket = [] for i in range(len(buckets)): len_bucket = len(buckets[i]) @@ -590,12 +590,11 @@ def _create_buckets(self): rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size num_samples_per_bucket.append(len_bucket + rem) return buckets, num_samples_per_bucket - + def __iter__(self): # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) - print('Epoch:', self.epoch) indices = [] if self.shuffle: for bucket in self.buckets: @@ -603,38 +602,38 @@ def __iter__(self): else: for bucket in self.buckets: indices.append(list(range(len(bucket)))) - + batches = [] for i in range(len(self.buckets)): bucket = self.buckets[i] len_bucket = len(bucket) ids_bucket = indices[i] num_samples_bucket = self.num_samples_per_bucket[i] - + # add extra samples to make it evenly divisible rem = num_samples_bucket - len_bucket ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - + # subsample ids_bucket = ids_bucket[self.rank::self.num_replicas] - + # batching for j in range(len(ids_bucket) // self.batch_size): batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] batches.append(batch) - + if self.shuffle: batch_ids = torch.randperm(len(batches), generator=g).tolist() batches = [batches[i] for i in batch_ids] self.batches = batches - + assert len(self.batches) * self.batch_size == self.num_samples return iter(self.batches) - + def _bisect(self, x, lo=0, hi=None): if hi is None: hi = len(self.boundaries) - 1 - + if hi > lo: mid = (hi + lo) // 2 if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 8c0416855561..c5ab41602a60 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import functools import json import math import os @@ -218,6 +218,9 @@ def __init__( self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) + random.seed(1234) + random.shuffle(self.data) + self.add_blank = add_blank # Initialize audio and mel related parameters self.sample_rate = sample_rate @@ -247,14 +250,14 @@ def __init__( 'none': None, }.get(self.window, None) - self.stft = lambda x: torch.stft( - input=x, - n_fft=self.n_fft, - hop_length=self.hop_len, - win_length=self.win_length, - window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, - return_complex=True, - ) + # self.stft = lambda x: torch.stft( + # input=x, + # n_fft=self.n_fft, + # hop_length=self.hop_len, + # win_length=self.win_length, + # window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + # return_complex=True, + # ) # Initialize sup_data_path, sup_data_types and run preprocessing methods for every supplementary data type if sup_data_path is not None: @@ -382,7 +385,13 @@ def add_speaker_id(self, **kwargs): def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): - spec = self.stft(audio) + spec = torch.stft(audio, + n_fft=self.n_fft, + hop_length=self.hop_len, + win_length=self.win_length, + window=torch.hann_window(self.win_length, periodic=False).to(torch.float), + return_complex=True) + if spec.dtype in [torch.cfloat, torch.cdouble]: spec = torch.view_as_real(spec) spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index 1df5649db4e1..a5c15e0b2c4d 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -17,6 +17,7 @@ import string from contextlib import contextmanager from typing import List +import functools import re @@ -397,7 +398,7 @@ def __init__( sep='|', # To be able to distinguish between 2/3 letters codes. add_blank_at=None, pad_with_space=False, - text_preprocessing_func=lambda text: english_text_preprocessing(text, lower=False), + text_preprocessing_func=functools.partial(english_text_preprocessing, lower=False) ): """English phoneme-based tokenizer. Args: From 7934c5b9898c1fc2f9acfd85f96f8e85d5dde731 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sat, 17 Sep 2022 06:56:27 -0700 Subject: [PATCH 140/244] added dataset class --- nemo/collections/tts/torch/data.py | 465 ++++++++++++++++++++++++++++- 1 file changed, 451 insertions(+), 14 deletions(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index c5ab41602a60..79e60ec76caf 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -250,14 +250,14 @@ def __init__( 'none': None, }.get(self.window, None) - # self.stft = lambda x: torch.stft( - # input=x, - # n_fft=self.n_fft, - # hop_length=self.hop_len, - # win_length=self.win_length, - # window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, - # return_complex=True, - # ) + self.stft = lambda x: torch.stft( + input=x, + n_fft=self.n_fft, + hop_length=self.hop_len, + win_length=self.win_length, + window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, + return_complex=True, + ) # Initialize sup_data_path, sup_data_types and run preprocessing methods for every supplementary data type if sup_data_path is not None: @@ -385,12 +385,7 @@ def add_speaker_id(self, **kwargs): def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): - spec = torch.stft(audio, - n_fft=self.n_fft, - hop_length=self.hop_len, - win_length=self.win_length, - window=torch.hann_window(self.win_length, periodic=False).to(torch.float), - return_complex=True) + spec = self.stft(audio) if spec.dtype in [torch.cfloat, torch.cdouble]: spec = torch.view_as_real(spec) @@ -923,3 +918,445 @@ def __getitem__(self, index): def __len__(self): return len(self.data) + +class VitsDataset(Dataset): + def __init__( + self, + manifest_filepath: Union[str, Path, List[str], List[Path]], + sample_rate: int, + text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]], + tokens: Optional[List[str]] = None, + text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None, + text_normalizer_call_kwargs: Optional[Dict] = None, + text_tokenizer_pad_id: Optional[int] = None, + sup_data_types: Optional[List[str]] = None, + sup_data_path: Optional[Union[Path, str]] = None, + max_duration: Optional[float] = None, + min_duration: Optional[float] = 0.1, + ignore_file: Optional[Union[str, Path]] = None, + trim: bool = False, + n_fft: int = 1024, + win_length: Optional[int] = 1024, + hop_length: Optional[int] = 256, + n_mels: int = 80, + add_blank=True, + **kwargs, + ): + """Dataset which can be used for training spectrogram generators and end-to-end TTS models. + It loads main data types (audio, text) and specified supplementary data types (log mel, durations, align prior matrix, pitch, energy, speaker id). + Some of supplementary data types will be computed on the fly and saved in the sup_data_path if they did not exist before. + Saved folder can be changed for some supplementary data types (see keyword args section). + Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section). + Args: + manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing information on the + dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid + json. Each line should contain the following: + "audio_filepath": , + "text": , + "normalized_text": (Optional), + "mel_filepath": (Optional), + "duration": (Optional) + sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. + text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer. + tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer. + text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer. + text_normalizer_call_kwargs (Optional[Dict]): Additional arguments for text_normalizer function. + text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer. + sup_data_types (Optional[List[str]]): List of supplementary data types. + sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch). + max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be + pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load + audio to compute duration. Defaults to None which does not prune. + min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be + pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load + audio to compute duration. Defaults to None which does not prune. + ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths + that will be pruned prior to training. Defaults to None which does not prune. + trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False. + n_fft (int): The number of fft samples. Defaults to 1024 + win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft. + hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4. + window (str): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the + equivalent torch window function. + n_mels (int): The number of mel filters. Defaults to 80. + lowfreq (int): The lowfreq input to the mel filter calculation. Defaults to 0. + highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None. + Keyword Args: + log_mel_folder (Optional[Union[Path, str]]): The folder that contains or will contain log mel spectrograms. + align_prior_matrix_folder (Optional[Union[Path, str]]): The folder that contains or will contain align prior matrices. + pitch_folder (Optional[Union[Path, str]]): The folder that contains or will contain pitch. + energy_folder (Optional[Union[Path, str]]): The folder that contains or will contain energy. + durs_file (Optional[str]): String path to pickled durations location. + durs_type (Optional[str]): Type of durations. Currently supported only "aligner-based". + use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator for calculating alignment prior matrix. Defaults to False. + pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2'). + pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7'). + pitch_mean (Optional[float]): The mean that we use to normalize the pitch. + pitch_std (Optional[float]): The std that we use to normalize the pitch. + pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_mean and pitch_std) or not. + """ + super().__init__() + + # Initialize text tokenizer + self.text_tokenizer = text_tokenizer + + self.phoneme_probability = None + if isinstance(self.text_tokenizer, IPAPhonemesTokenizer): + self.text_tokenizer_pad_id = text_tokenizer.pad + self.tokens = text_tokenizer.tokens + self.phoneme_probability = getattr(self.text_tokenizer, "phoneme_probability", None) + else: + if text_tokenizer_pad_id is None: + raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer") + + if tokens is None: + raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer") + + self.text_tokenizer_pad_id = text_tokenizer_pad_id + self.tokens = tokens + self.cache_text = True if self.phoneme_probability is None else False + + # Initialize text normalizer is specified + self.text_normalizer = text_normalizer + self.text_normalizer_call = ( + self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer + ) + self.text_normalizer_call_kwargs = ( + text_normalizer_call_kwargs if text_normalizer_call_kwargs is not None else {} + ) + + # Initialize and read manifest file(s), filter out data by duration and ignore_file, compute base dir + if isinstance(manifest_filepath, str): + manifest_filepath = [manifest_filepath] + self.manifest_filepath = manifest_filepath + self.lengths = [] + + data = [] + total_duration = 0 + for manifest_file in self.manifest_filepath: + with open(Path(manifest_file).expanduser(), 'r') as f: + logging.info(f"Loading dataset from {manifest_file}.") + for line in tqdm(f): + item = json.loads(line) + + file_info = { + "audio_filepath": item["audio_filepath"], + "original_text": item["text"], + "mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None, + "duration": item["duration"] if "duration" in item else None, + } + + if "normalized_text" not in item: + text = item["text"] + if self.text_normalizer is not None: + text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs) + file_info["normalized_text"] = text + else: + file_info["normalized_text"] = item["normalized_text"] + + if self.cache_text: + file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) + + if self.cache_text: + file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) + + data.append(file_info) + self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) + if file_info["duration"] is None: + logging.info( + "Not all audio files have duration information. Duration logging will be disabled." + ) + total_duration = None + + if total_duration is not None: + total_duration += item["duration"] + + logging.info(f"Loaded dataset with {len(data)} files.") + if total_duration is not None: + logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") + + self.data = VitsDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) + self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) + + random.seed(1234) + random.shuffle(self.data) + + self.add_blank = add_blank + # Initialize audio and mel related parameters + self.sample_rate = sample_rate + self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate) + self.trim = trim + + self.n_fft = n_fft + self.n_mels = n_mels + + + + @staticmethod + def filter_files(data, ignore_file, min_duration, max_duration, total_duration): + if ignore_file: + logging.info(f"Using {ignore_file} to prune dataset.") + with open(Path(ignore_file).expanduser(), "rb") as f: + wavs_to_ignore = set(pickle.load(f)) + + filtered_data: List[Dict] = [] + pruned_duration = 0 if total_duration is not None else None + pruned_items = 0 + for item in data: + audio_path = item['audio_filepath'] + + # Prune data according to min/max_duration & the ignore file + if total_duration is not None: + if (min_duration and item["duration"] < min_duration) or ( + max_duration and item["duration"] > max_duration + ): + pruned_duration += item["duration"] + pruned_items += 1 + continue + + if ignore_file and (audio_path in wavs_to_ignore): + pruned_items += 1 + pruned_duration += item["duration"] + wavs_to_ignore.remove(audio_path) + continue + + filtered_data.append(item) + + logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(filtered_data)} files") + if pruned_duration is not None: + logging.info( + f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains " + f"{(total_duration - pruned_duration) / 3600:.2f} hours." + ) + + return filtered_data + + def get_spec(self, audio): + with torch.cuda.amp.autocast(enabled=False): + spec = torch.stft(audio, + n_fft=self.n_fft, + hop_length=self.hop_len, + win_length=self.win_length, + window=torch.hann_window(self.win_length, periodic=False).to(torch.float), + return_complex=True) + + if spec.dtype in [torch.cfloat, torch.cdouble]: + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) + return spec + + def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + def __getitem__(self, index): + sample = self.data[index] + + # Load audio + features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) + audio, audio_length = features, torch.tensor(features.shape[0]).long() + + tokenized = self.text_tokenizer(sample["normalized_text"]) + tokenized = intersperse(tokenized, 0) + text = torch.tensor(tokenized).long() + text_length = torch.tensor(len(tokenized)).long() + + return ( + audio, + audio_length, + text, + text_length, + ) + + def __len__(self): + return len(self.data) + + def join_data(self, data_dict): + result = [] + for data_type in MAIN_DATA_TYPES: + result.append(data_dict[data_type.name]) + + if issubclass(data_type, TTSDataType) and issubclass(data_type, WithLens): + result.append(data_dict[f"{data_type.name}_lens"]) + + return tuple(result) + + def general_collate_fn(self, batch): + ( + _, + audio_lengths, + _, + tokens_lengths, + ) = zip(*batch) + + max_audio_len = max(audio_lengths).item() + max_tokens_len = max(tokens_lengths).item() + + audios, tokens = [], [] + + for i, sample_tuple in enumerate(batch): + ( + audio, + audio_len, + token, + token_len, + ) = sample_tuple + + audio = general_padding(audio, audio_len.item(), max_audio_len) + audios.append(audio) + + token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id) + tokens.append(token) + + + data_dict = { + "audio": torch.stack(audios), + "audio_lens": torch.stack(audio_lengths), + "text": torch.stack(tokens), + "text_lens": torch.stack(tokens_lengths), + } + + return data_dict + + def _collate_fn(self, batch): + data_dict = self.general_collate_fn(batch) + joined_data = self.join_data(data_dict) + return joined_data + + +class TextAudioLoader(torch.utils.data.Dataset): + """ + 1) loads audio, text pairs + 2) normalizes text and converts them to sequences of integers + 3) computes spectrograms from audio files. + """ + def __init__(self, audiopaths_and_text, hparams): + self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) + self.text_cleaners = hparams.text_cleaners + self.max_wav_value = hparams.max_wav_value + self.sampling_rate = hparams.sampling_rate + self.filter_length = hparams.filter_length + self.hop_length = hparams.hop_length + self.win_length = hparams.win_length + self.sampling_rate = hparams.sampling_rate + + self.cleaned_text = getattr(hparams, "cleaned_text", False) + + self.add_blank = hparams.add_blank + self.min_text_len = getattr(hparams, "min_text_len", 1) + self.max_text_len = getattr(hparams, "max_text_len", 190) + + random.seed(1234) + random.shuffle(self.audiopaths_and_text) + self._filter() + + + def _filter(self): + """ + Filter text & store spec lengths + """ + # Store spectrogram lengths for Bucketing + # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) + # spec_length = wav_length // hop_length + + audiopaths_and_text_new = [] + lengths = [] + for audiopath, text in self.audiopaths_and_text: + if self.min_text_len <= len(text) and len(text) <= self.max_text_len: + audiopaths_and_text_new.append([audiopath, text]) + lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) + self.audiopaths_and_text = audiopaths_and_text_new + self.lengths = lengths + + def get_audio_text_pair(self, audiopath_and_text): + # separate filename and text + audiopath, text = audiopath_and_text[0], audiopath_and_text[1] + text = self.get_text(text) + spec, wav = self.get_audio(audiopath) + return (text, spec, wav) + + def get_audio(self, filename): + audio, sampling_rate = load_wav_to_torch(filename) + if sampling_rate != self.sampling_rate: + raise ValueError("{} {} SR doesn't match target {} SR".format( + sampling_rate, self.sampling_rate)) + audio_norm = audio / self.max_wav_value + audio_norm = audio_norm.unsqueeze(0) + spec_filename = filename.replace(".wav", ".spec.pt") + if os.path.exists(spec_filename): + spec = torch.load(spec_filename) + else: + spec = spectrogram_torch(audio_norm, self.filter_length, + self.sampling_rate, self.hop_length, self.win_length, + center=False) + spec = torch.squeeze(spec, 0) + torch.save(spec, spec_filename) + return spec, audio_norm + + def get_text(self, text): + if self.cleaned_text: + text_norm = cleaned_text_to_sequence(text) + else: + text_norm = text_to_sequence(text, self.text_cleaners) + if self.add_blank: + text_norm = commons.intersperse(text_norm, 0) + text_norm = torch.LongTensor(text_norm) + return text_norm + + def __getitem__(self, index): + return self.get_audio_text_pair(self.audiopaths_and_text[index]) + + def __len__(self): + return len(self.audiopaths_and_text) + + +class TextAudioCollate(): + """ Zero-pads model inputs and targets + """ + def __init__(self, return_ids=False): + self.return_ids = return_ids + + def __call__(self, batch): + """Collate's training batch from normalized text and aduio + PARAMS + ------ + batch: [text_normalized, spec_normalized, wav_normalized] + """ + # Right zero-pad all one-hot text sequences to max input length + _, ids_sorted_decreasing = torch.sort( + torch.LongTensor([x[1].size(1) for x in batch]), + dim=0, descending=True) + + max_text_len = max([len(x[0]) for x in batch]) + max_spec_len = max([x[1].size(1) for x in batch]) + max_wav_len = max([x[2].size(1) for x in batch]) + + text_lengths = torch.LongTensor(len(batch)) + spec_lengths = torch.LongTensor(len(batch)) + wav_lengths = torch.LongTensor(len(batch)) + + text_padded = torch.LongTensor(len(batch), max_text_len) + spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) + wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) + text_padded.zero_() + spec_padded.zero_() + wav_padded.zero_() + for i in range(len(ids_sorted_decreasing)): + row = batch[ids_sorted_decreasing[i]] + + text = row[0] + text_padded[i, :text.size(0)] = text + text_lengths[i] = text.size(0) + + spec = row[1] + spec_padded[i, :, :spec.size(1)] = spec + spec_lengths[i] = spec.size(1) + + wav = row[2] + wav_padded[i, :, :wav.size(1)] = wav + wav_lengths[i] = wav.size(1) + + if self.return_ids: + return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing + return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths \ No newline at end of file From 68c93f96139a0c4d1a876e79578a2d073d1d8fc2 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sun, 18 Sep 2022 10:39:35 -0700 Subject: [PATCH 141/244] fixed torch version --- requirements/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index b22198fa55d8..1e470e0b0cff 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -2,7 +2,7 @@ numpy==1.21 setuptools==59.5.0 onnx>=1.7.0 python-dateutil -torch +torch>=1.12 wrapt ruamel.yaml scikit-learn From 600decbc87c88e3902db069e8f4a02a2af80e3bd Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 19 Sep 2022 09:21:46 -0700 Subject: [PATCH 142/244] added autocast for fp training --- nemo/collections/tts/models/vits.py | 40 ++++---- nemo/collections/tts/torch/data.py | 137 ---------------------------- 2 files changed, 20 insertions(+), 157 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index d566badf89a3..1eb955e2deb2 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -214,10 +214,10 @@ def training_step(self, batch, batch_idx): spec = self.get_spec(y) spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) - # with autocast(enabled=True): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths - ) + with autocast(enabled=True): + y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + x, x_lengths, spec, spec_lengths + ) mel = spec_to_mel_torch( spec, @@ -244,13 +244,13 @@ def training_step(self, batch, batch_idx): y = torch.unsqueeze(y, 1) y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - # with autocast(enabled=True): - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + with autocast(enabled=True): + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) - # with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - disc_generated_outputs=y_d_hat_g) - loss_disc_all = loss_disc + with autocast(enabled=False): + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, + disc_generated_outputs=y_d_hat_g) + loss_disc_all = loss_disc # train discriminator @@ -259,16 +259,16 @@ def training_step(self, batch, batch_idx): norm_d = clip_grad_value_(self.net_d.parameters(), None) optim_d.step() - # with autocast(enabled=True): - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) - # with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel - loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) - loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + with autocast(enabled=True): + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + # Generator + with autocast(enabled=False): + loss_dur = torch.sum(l_length.float()) + loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl # train generator diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 79e60ec76caf..4f5f1444cfc9 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -1223,140 +1223,3 @@ def _collate_fn(self, batch): data_dict = self.general_collate_fn(batch) joined_data = self.join_data(data_dict) return joined_data - - -class TextAudioLoader(torch.utils.data.Dataset): - """ - 1) loads audio, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - def __init__(self, audiopaths_and_text, hparams): - self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text) - self.text_cleaners = hparams.text_cleaners - self.max_wav_value = hparams.max_wav_value - self.sampling_rate = hparams.sampling_rate - self.filter_length = hparams.filter_length - self.hop_length = hparams.hop_length - self.win_length = hparams.win_length - self.sampling_rate = hparams.sampling_rate - - self.cleaned_text = getattr(hparams, "cleaned_text", False) - - self.add_blank = hparams.add_blank - self.min_text_len = getattr(hparams, "min_text_len", 1) - self.max_text_len = getattr(hparams, "max_text_len", 190) - - random.seed(1234) - random.shuffle(self.audiopaths_and_text) - self._filter() - - - def _filter(self): - """ - Filter text & store spec lengths - """ - # Store spectrogram lengths for Bucketing - # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2) - # spec_length = wav_length // hop_length - - audiopaths_and_text_new = [] - lengths = [] - for audiopath, text in self.audiopaths_and_text: - if self.min_text_len <= len(text) and len(text) <= self.max_text_len: - audiopaths_and_text_new.append([audiopath, text]) - lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length)) - self.audiopaths_and_text = audiopaths_and_text_new - self.lengths = lengths - - def get_audio_text_pair(self, audiopath_and_text): - # separate filename and text - audiopath, text = audiopath_and_text[0], audiopath_and_text[1] - text = self.get_text(text) - spec, wav = self.get_audio(audiopath) - return (text, spec, wav) - - def get_audio(self, filename): - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} {} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - return spec, audio_norm - - def get_text(self, text): - if self.cleaned_text: - text_norm = cleaned_text_to_sequence(text) - else: - text_norm = text_to_sequence(text, self.text_cleaners) - if self.add_blank: - text_norm = commons.intersperse(text_norm, 0) - text_norm = torch.LongTensor(text_norm) - return text_norm - - def __getitem__(self, index): - return self.get_audio_text_pair(self.audiopaths_and_text[index]) - - def __len__(self): - return len(self.audiopaths_and_text) - - -class TextAudioCollate(): - """ Zero-pads model inputs and targets - """ - def __init__(self, return_ids=False): - self.return_ids = return_ids - - def __call__(self, batch): - """Collate's training batch from normalized text and aduio - PARAMS - ------ - batch: [text_normalized, spec_normalized, wav_normalized] - """ - # Right zero-pad all one-hot text sequences to max input length - _, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[1].size(1) for x in batch]), - dim=0, descending=True) - - max_text_len = max([len(x[0]) for x in batch]) - max_spec_len = max([x[1].size(1) for x in batch]) - max_wav_len = max([x[2].size(1) for x in batch]) - - text_lengths = torch.LongTensor(len(batch)) - spec_lengths = torch.LongTensor(len(batch)) - wav_lengths = torch.LongTensor(len(batch)) - - text_padded = torch.LongTensor(len(batch), max_text_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - text_padded.zero_() - spec_padded.zero_() - wav_padded.zero_() - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - text = row[0] - text_padded[i, :text.size(0)] = text - text_lengths[i] = text.size(0) - - spec = row[1] - spec_padded[i, :, :spec.size(1)] = spec - spec_lengths[i] = spec.size(1) - - wav = row[2] - wav_padded[i, :, :wav.size(1)] = wav - wav_lengths[i] = wav.size(1) - - if self.return_ids: - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths, ids_sorted_decreasing - return text_padded, text_lengths, spec_padded, spec_lengths, wav_padded, wav_lengths \ No newline at end of file From 701763884a3d63f984b16f5aa1b542a7b219143f Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 20 Sep 2022 13:46:08 -0700 Subject: [PATCH 143/244] removed coqui files --- .../tts/losses/vits_coqui_losses.py | 131 -- nemo/collections/tts/models/vits_coqui.py | 515 ----- .../tts/modules/vits_coqui_modules.py | 1671 ----------------- 3 files changed, 2317 deletions(-) delete mode 100644 nemo/collections/tts/losses/vits_coqui_losses.py delete mode 100644 nemo/collections/tts/models/vits_coqui.py delete mode 100644 nemo/collections/tts/modules/vits_coqui_modules.py diff --git a/nemo/collections/tts/losses/vits_coqui_losses.py b/nemo/collections/tts/losses/vits_coqui_losses.py deleted file mode 100644 index ce21566757ce..000000000000 --- a/nemo/collections/tts/losses/vits_coqui_losses.py +++ /dev/null @@ -1,131 +0,0 @@ -import torch -import torch.nn as nn - -class VitsGeneratorLoss(nn.Module): - def __init__(self): - super().__init__() - self.kl_loss_alpha = 45 - self.gen_loss_alpha = 1 - self.feat_loss_alpha = 1 - self.dur_loss_alpha = 1 - self.mel_loss_alpha = 1 - - @staticmethod - def feature_loss(feats_real, feats_generated): - loss = 0 - for dr, dg in zip(feats_real, feats_generated): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - return loss * 2 - - @staticmethod - def generator_loss(scores_fake): - loss = 0 - gen_losses = [] - for dg in scores_fake: - dg = dg.float() - l = torch.mean((1 - dg) ** 2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - @staticmethod - def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l - - - def forward( - self, - mel_slice, - mel_slice_hat, - z_p, - logs_q, - m_p, - logs_p, - z_mask, - scores_disc_fake, - feats_disc_fake, - feats_disc_real, - loss_duration, - ): - """ - Shapes: - - mel_slice : :math:`[B, 1, T]` - - mel_slice_hat: :math:`[B, 1, T]` - - z_p: :math:`[B, C, T]` - - logs_q: :math:`[B, C, T]` - - m_p: :math:`[B, C, T]` - - logs_p: :math:`[B, C, T]` - - z_len: :math:`[B]` - - scores_disc_fake[i]: :math:`[B, C]` - - feats_disc_fake[i][j]: :math:`[B, C, T', P]` - - feats_disc_real[i][j]: :math:`[B, C, T', P]` - """ - loss = 0.0 - return_dict = {} - # compute losses - loss_kl = ( - self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask.unsqueeze(1)) - * self.kl_loss_alpha - ) - loss_feat = ( - self.feature_loss(feats_real=feats_disc_real, feats_generated=feats_disc_fake) * self.feat_loss_alpha - ) - loss_gen = self.generator_loss(scores_fake=scores_disc_fake)[0] * self.gen_loss_alpha - loss_mel = torch.nn.functional.l1_loss(mel_slice, mel_slice_hat) * self.mel_loss_alpha - loss_duration = torch.sum(loss_duration.float()) * self.dur_loss_alpha - loss = loss_kl + loss_feat + loss_mel + loss_gen + loss_duration - - # pass losses to the dict - return_dict["loss_gen"] = loss_gen - return_dict["loss_kl"] = loss_kl - return_dict["loss_feat"] = loss_feat - return_dict["loss_mel"] = loss_mel - return_dict["loss_duration"] = loss_duration - return_dict["loss"] = loss - return return_dict - - -class VitsDiscriminatorLoss(nn.Module): - def __init__(self): - super().__init__() - - @staticmethod - def discriminator_loss(scores_real, scores_fake): - loss = 0 - real_losses = [] - fake_losses = [] - for dr, dg in zip(scores_real, scores_fake): - dr = dr.float() - dg = dg.float() - real_loss = torch.mean((1 - dr) ** 2) - fake_loss = torch.mean(dg**2) - loss += real_loss + fake_loss - real_losses.append(real_loss.item()) - fake_losses.append(fake_loss.item()) - return loss, real_losses, fake_losses - - def forward(self, scores_disc_real, scores_disc_fake): - loss = 0.0 - loss_disc, loss_disc_real, loss_disc_fake = self.discriminator_loss( - scores_real=scores_disc_real, scores_fake=scores_disc_fake - ) - - return loss_disc, loss_disc_real, loss_disc_fake \ No newline at end of file diff --git a/nemo/collections/tts/models/vits_coqui.py b/nemo/collections/tts/models/vits_coqui.py deleted file mode 100644 index ea41db34e9af..000000000000 --- a/nemo/collections/tts/models/vits_coqui.py +++ /dev/null @@ -1,515 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from nemo.core import typecheck - -# typecheck.set_typecheck_enabled(False) - -import omegaconf -import torch -import wandb -from hydra.utils import instantiate -from omegaconf import DictConfig -from pytorch_lightning import Trainer -from pytorch_lightning.loggers import WandbLogger -from torch.cuda.amp import autocast, GradScaler -from torch.nn import functional as F - -from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler -from nemo.collections.tts.losses.vits_losses import ( - KlLoss, - FeatureMatchingLoss, - DiscriminatorLoss, - GeneratorLoss -) - -from nemo.collections.tts.losses.vits_coqui_losses import ( - VitsDiscriminatorLoss, - VitsGeneratorLoss, -) - -from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_coqui_modules import ( - SynthesizerTrn, - audio_to_mel_torch, - clip_grad_value_, - slice_segments, - spec_to_mel_torch, -) - -from nemo.collections.tts.modules.vits_modules import ( - MultiPeriodDiscriminator, -) - -from nemo.core.classes.common import PretrainedModelInfo -from nemo.core.optim.lr_scheduler import CosineAnnealing -from nemo.utils import logging, model_utils - -class VitsModel(TextToWaveform): - def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): - # Convert to Hydra 1.0 compatible DictConfig - - cfg = model_utils.convert_model_config_to_dict_config(cfg) - cfg = model_utils.maybe_update_config_version(cfg) - - # setup normalizer - self.normalizer = None - self.text_normalizer_call = None - self.text_normalizer_call_kwargs = {} - self._setup_normalizer(cfg) - - # setup tokenizer - self.tokenizer = None - self._setup_tokenizer(cfg) - assert self.tokenizer is not None - - num_tokens = len(self.tokenizer.tokens) - self.tokenizer_pad = self.tokenizer.pad - self.tokenizer_unk = self.tokenizer.oov - - # self.scaler = GradScaler() - - super().__init__(cfg=cfg, trainer=trainer) - - self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) - - self.feat_matching_loss = FeatureMatchingLoss() - self.disc_loss = DiscriminatorLoss() - self.gen_loss = GeneratorLoss() - self.kl_loss = KlLoss() - - self.log_train_images = False - self.logged_real_samples = False - self._tb_logger = None - self.hann_window = None - self.sample_rate = cfg.sample_rate - self.hop_size = cfg.n_window_stride - self.n_fft = cfg.train_ds.dataset.n_fft - self.win_length = cfg.train_ds.dataset.win_length - - # TODO: need to add SynthesizerTrn in config - self.net_g = SynthesizerTrn( - n_vocab=num_tokens, - spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, - segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, - inter_channels=cfg.inter_channels, - hidden_channels=cfg.hidden_channels, - filter_channels=cfg.filter_channels, - n_heads=cfg.n_heads, - n_layers=cfg.n_layers, - kernel_size=cfg.pitch_embedding_kernel_size, - p_dropout=cfg.p_dropout, - padding_idx=self.tokenizer_pad, - resblock=cfg.generator.resblock, - resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, - resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, - upsample_rates=cfg.generator.upsample_rates, - upsample_initial_channel=cfg.generator.upsample_initial_channel, - upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, - ) - self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) - self.automatic_optimization = False - - window_fn = { - 'hann': torch.hann_window, - 'hamming': torch.hamming_window, - 'blackman': torch.blackman_window, - 'bartlett': torch.bartlett_window, - 'none': None, - }.get(self.hann_window, None) - - self.stft = lambda x: torch.stft( - input=x, - n_fft=self.n_fft, - hop_length=self.hop_size, - win_length=self.win_length, - window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, - ) - - def _setup_normalizer(self, cfg): - if "text_normalizer" in cfg: - normalizer_kwargs = {} - - if "whitelist" in cfg.text_normalizer: - normalizer_kwargs["whitelist"] = self.register_artifact( - 'text_normalizer.whitelist', cfg.text_normalizer.whitelist - ) - - self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) - self.text_normalizer_call = self.normalizer.normalize - if "text_normalizer_call_kwargs" in cfg: - self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs - - def _setup_tokenizer(self, cfg): - text_tokenizer_kwargs = {} - if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: - g2p_kwargs = {} - - if "phoneme_dict" in cfg.text_tokenizer.g2p: - g2p_kwargs["phoneme_dict"] = self.register_artifact( - 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, - ) - - if "heteronyms" in cfg.text_tokenizer.g2p: - g2p_kwargs["heteronyms"] = self.register_artifact( - 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, - ) - - text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) - - self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) - - def parse(self, str_input: str) -> torch.tensor: - # TODO: Implement - pass - - def configure_optimizers(self): - optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) - scheduler_g_dict = { - 'scheduler': scheduler_g, - 'interval': 'step', - } - - scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} - return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] - - # only for inference - def forward(self, batch, batch_idx, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - with torch.no_grad(): - (y, y_lengths, x, x_lengths) = batch - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - - y_hat, attn, mask, (z, z_p, m_p, logs_p) = self.net_g.infer(x, x_lengths, sid=sid, noise_scale=noise_scale, - length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.n_window_stride - return y_hat, y_hat_lengths, (z, z_p, m_p, logs_p) - - def get_spec(self, audio): - with torch.cuda.amp.autocast(enabled=False): - spec = self.stft(audio) - if spec.dtype in [torch.cfloat, torch.cdouble]: - spec = torch.view_as_real(spec) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) - return spec - - def _freeze_layers(self): - if self.args.freeze_encoder: - for param in self.text_encoder.parameters(): - param.requires_grad = False - - if self.args.freeze_PE: - for param in self.posterior_encoder.parameters(): - param.requires_grad = False - - if self.args.freeze_DP: - for param in self.duration_predictor.parameters(): - param.requires_grad = False - - if self.args.freeze_flow_decoder: - for param in self.flow.parameters(): - param.requires_grad = False - - if self.args.freeze_waveform_decoder: - for param in self.waveform_decoder.parameters(): - param.requires_grad = False - - def training_step(self, batch, batch_idx): - """Perform a single training step. Run the model forward pass and compute losses. - Args: - batch (Dict): Input tensors. - criterion (nn.Module): Loss layer designed for the model. - optimizer_idx (int): Index of optimizer to use. 0 for the generator and 1 for the discriminator networks. - Returns: - Tuple[Dict, Dict]: Model ouputs and computed losses. - """ - optim_g, optim_d = self.optimizers() - - (waveform, y_lengths, tokens, token_lenghts) = batch - - spec = self.get_spec(waveform) - spec_lens = self.audio_to_melspec_precessor.get_seq_len(y_lengths) - - # self._freeze_layers() - - # Discriminator - # generator pass - outputs = self.net_g( - tokens, - token_lenghts, - spec, - spec_lens, - ) - - # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = outputs - - y = torch.unsqueeze(waveform, 1) - y = slice_segments(y, outputs["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) - # compute scores and features - - y_d_hat_r, y_d_hat_g, _, _ = self.net_d( - y, outputs["model_outputs"].detach() - ) - - optim_d.zero_grad() - # compute loss - with autocast(enabled=False): # use float32 for the criterion - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - disc_generated_outputs=y_d_hat_g) - loss_disc_all = loss_disc - - self.manual_backward(loss_disc_all) - optim_d.step() - - loss_dict = { - "loss_disc_all": loss_disc_all, - } - - for i, v in enumerate(losses_disc_r): - loss_dict[f"loss_disc_r_{i}"] = v - - for i, v in enumerate(losses_disc_g): - loss_dict[f"loss_disc_g_{i}"] = v - - # Generator - - - - mel = spec_to_mel_torch( - spec, - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - # y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.model_outputs_cache - # compute melspec segment - with autocast(enabled=False): - mel_slice = slice_segments(mel, outputs["slice_ids"], self._cfg.segment_size // self.cfg.n_window_stride) - mel_slice_hat = audio_to_mel_torch( - outputs["model_outputs"].float().squeeze(1), - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self.cfg.n_window_stride, - self._cfg.preprocessor.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - y = torch.unsqueeze(waveform, 1) - y = slice_segments(y, outputs["slice_ids"] * self.cfg.n_window_stride, self._cfg.segment_size) - # compute discriminator scores and features - - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d( - y, outputs["model_outputs"] - ) - - optim_g.zero_grad() - # compute losses - with autocast(enabled=False): # use float32 for the criterion - loss_dur = torch.sum(outputs["loss_duration"].float()) - loss_mel = F.l1_loss(mel_slice, mel_slice_hat) * self._cfg.c_mel - loss_kl = self.kl_loss(z_p=outputs["z_p"], - logs_q=outputs["logs_q"], - m_p=outputs["m_p"], - logs_p=outputs["logs_p"], - z_mask=outputs["z_mask"]) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) - loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - - - self.manual_backward(loss_gen_all) - optim_g.step() - - loss_dict.update({ - "loss_gen_all": loss_gen_all, - "loss_gen": loss_gen, - "loss_fm": loss_fm, - "loss_mel * c_mel": loss_mel, - "loss_dur": loss_dur, - "loss_kl * c_kl": loss_kl, - } - ) - - for i, v in enumerate(losses_gen): - loss_dict[f"loss_gen_i_{i}"] = v - - self.log_dict(loss_dict, on_step=True, sync_dist=True) - - def _log(self, batch, outputs, name_prefix="train"): # pylint: disable=unused-argument,no-self-use - y_hat, l_length, attn, ids_slice, x_mask, z_mask, _ = outputs - (y, y_lengths, x, x_lengths) = batch - y_hat = y_hat.squeeze() - y_hat_lengths = z_mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length - mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) - logger = self.logger.experiment - # print(logger, self.logger) - if logger is not None and isinstance(self.logger, WandbLogger): - specs = [] - audios = [] - - specs += [ - wandb.Image( - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", - ), - wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - caption=name_prefix +"_mel_predicted", - ), - ] - - audios += [ - wandb.Audio( - y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=name_prefix +"_wav_target", - sample_rate=self.sample_rate, - ), - wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=name_prefix +"_wav_predicted", - sample_rate=self.sample_rate, - ), - ] - - logger.log({"specs": specs, "audios": audios}) - - # def train_log( - # self, batch, outputs, logger, assets: dict, steps: int - # ): # pylint: disable=no-self-use - # """Create visualizations and waveform examples. - # For example, here you can plot spectrograms and generate sample sample waveforms from these spectrograms to - # be projected onto Tensorboard. - # Args: - # ap (AudioProcessor): audio processor used at training. - # batch (Dict): Model inputs used at the previous training step. - # outputs (Dict): Model outputs generated at the previoud training step. - # Returns: - # Tuple[Dict, np.ndarray]: training plots and output waveform. - # """ - # self._log(batch, outputs, "train") - - def eval_step(self, batch: dict, criterion): - return self.train_step(batch, criterion) - - def validation_step(self, batch, batch_idx): - (y, y_lengths, x, x_lengths) = batch - - # TODO: fix hardcode - y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) - y_hat = y_hat.squeeze() - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length - - mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) - - # plot audio once per epoch - if batch_idx == 0: - logger = self.logger.experiment - # print(logger, self.logger) - if logger is not None and isinstance(self.logger, WandbLogger): - specs = [] - audios = [] - - specs += [ - wandb.Image( - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", - ), - wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - caption=f"val_mel_predicted", - ), - ] - - audios += [ - wandb.Audio( - y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_target", - sample_rate=self.sample_rate, - ), - wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_predicted", - sample_rate=self.sample_rate, - ), - ] - - logger.log({"specs": specs, "audios": audios}) - - # def eval_log(self, batch: dict, outputs: dict, logger, assets: dict, steps: int) -> None: - # self._log(batch, outputs, "eval") - - def _loader(self, cfg): - try: - # _ = cfg.model.train_ds.manifest_filepath - _ = cfg['dataset']['manifest_filepath'] - except omegaconf.errors.MissingMandatoryValue: - logging.warning("manifest_filepath was skipped. No dataset for this model.") - return None - - dataset = instantiate( - cfg.dataset, - text_normalizer=self.normalizer, - text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, - text_tokenizer=self.tokenizer, - ) - return torch.utils.data.DataLoader( # noqa - dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, - ) - - def train_dataloader(self): - # default used by the Trainer - dataset = instantiate( - self.cfg.train_ds.dataset, - text_normalizer=self.normalizer, - text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, - text_tokenizer=self.tokenizer, - ) - - train_sampler = DistributedBucketSampler( - dataset, - self.cfg.train_ds.batch_sampler.batch_size, - [32,300,400,500,600,700,800,900,1000], - shuffle=True) - dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, - **self.cfg.train_ds.dataloader_params,) - print('made ddp loader') - return dataloader - - def setup_training_data(self, cfg): - self._train_dl = self._loader(cfg) - - def setup_validation_data(self, cfg): - self._validation_dl = self._loader(cfg) - - def setup_test_data(self, cfg): - """Omitted.""" - pass - - @classmethod - def list_available_models(cls) -> 'List[PretrainedModelInfo]': - list_of_models = [] - # TODO: List available models?? - return list_of_models - - def convert_text_to_waveform(self, *, tokens): - # TODO: Convert text to waveforms - pass diff --git a/nemo/collections/tts/modules/vits_coqui_modules.py b/nemo/collections/tts/modules/vits_coqui_modules.py deleted file mode 100644 index 533115ce912f..000000000000 --- a/nemo/collections/tts/modules/vits_coqui_modules.py +++ /dev/null @@ -1,1671 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# MIT License -# -# Copyright (c) 2021 Jaehyeon Kim -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -import math - -import numpy as np -import torch -from torch import nn -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn import functional as F -from librosa.filters import mel as librosa_mel_fn -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from nemo.collections.tts.modules.monotonic_align import maximum_path - -# TODO: need to do LARGE refactoring - - -LRELU_SLOPE = 0.1 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert (kernel_size % 2 == 1) - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, :self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels:, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - -# TODO: reuse from hifigan if it is possible? -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - -# TODO: reuse from hifigan if it is possible? -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, - gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = Log() - self.flows = nn.ModuleList() - self.flows.append(ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - # torch.manual_seed(1) - # torch.cuda.manual_seed(1) - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - padding_idx): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - # torch.manual_seed(1) - # torch.cuda.manual_seed(1) - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - -# TODO: reuse from hifigan if it is possible? -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = ResBlock1 if resblock == '1' else ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - -# TODO: reuse from hifigan if it is possible? -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.dropout = nn.Dropout(0.3) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - -# TODO: reuse from hifigan if it is possible? -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.dropout = nn.Dropout(0.3) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - -# TODO: reuse from hifigan if it is possible? -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - padding_idx, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.padding_idx = padding_idx - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - padding_idx) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - - outputs = { - "model_outputs": o, - "alignments": attn.squeeze(1), - "loss_duration": l_length, - "z_mask": y_mask, - "m_p": m_p, - "logs_p": logs_p, - "z": z, - "z_p": z_p, - "m_q": m_q, - "logs_q": logs_q, - "slice_ids": ids_slice, - } - - return outputs - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - # TODO: do we really need it? Can be used for emotions conversion - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - -################## -# Mel_processing # -################## - -mel_basis = {} -hann_window = {} - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -########### -# Commons # -########### - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str_max = ids_str_max.to(device=x.device) - # torch.manual_seed(1) - # torch.cuda.manual_seed(1) - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - -# TODO: reuse from helpers get_mask_from_lengths? -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm - - -############## -# Attentions # -############## -class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))).unsqueeze(0).unsqueeze(0).to(device=x.device, dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - self.conv_q = nn.Conv1d(channels, channels, 1) - self.conv_k = nn.Conv1d(channels, channels, 1) - self.conv_v = nn.Conv1d(channels, channels, 1) - self.conv_o = nn.Conv1d(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) - - nn.init.xavier_uniform_(self.conv_q.weight) - nn.init.xavier_uniform_(self.conv_k.weight) - nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with torch.no_grad(): - self.conv_k.weight.copy_(self.conv_q.weight) - self.conv_k.bias.copy_(self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2) - query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) - key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) - - scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) - if mask is not None: - scores = scores.masked_fill(mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) - scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = torch.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = torch.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padded_relative_embeddings = F.pad( - relative_embeddings, - convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.size() - # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) - - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, convert_pad_shape([[0,0],[0,0],[0,length-1]])) - - # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.size() - # padd along column - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = torch.arange(length, dtype=torch.float32) - diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) - return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) - - -class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = self.conv_1(self.padding(x * x_mask)) - if self.activation == "gelu": - x = x * torch.sigmoid(1.702 * x) - else: - x = torch.relu(x) - x = self.drop(x) - x = self.conv_2(self.padding(x * x_mask)) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, convert_pad_shape(padding)) - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - x = F.pad(x, convert_pad_shape(padding)) - return x - - -############## -# Transforms # -############## - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet From abc1b28f0a92c65b57fc7b9898bc429967c24df0 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Fri, 14 Oct 2022 05:33:26 -0700 Subject: [PATCH 144/244] 'Fixed tokenizer' --- nemo/collections/tts/torch/tts_tokenizers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index a5c15e0b2c4d..520437ce99e9 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -51,7 +51,7 @@ def __init__(self, tokens, *, pad=PAD, blank=BLANK, oov=OOV, sep='', add_blank_a # use add_blank_at=None only for ASR where blank is added automatically, disable blank here self.blank = None - self.oov, tokens = len(tokens), tokens + [oov] # Out Of Vocabulary + self.oov, tokens = len(tokens), tokens# + [oov] # Out Of Vocabulary if add_blank_at == "last": tokens[-1], tokens[-2] = tokens[-2], tokens[-1] @@ -377,7 +377,7 @@ class IPAPhonemesTokenizer(BaseTokenizer): PAD = '_' # fmt: on - _punctuation = ';:,.!?¡¿—…"«»“” ' + _punctuation = ';:,.!?¡¿—…"«»“”' _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" From ec5a6683c7d955ea690d7bbd722b258cdc999170 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Fri, 14 Oct 2022 05:34:06 -0700 Subject: [PATCH 145/244] Fix tokenizer --- nemo/collections/tts/torch/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 4f5f1444cfc9..417caeab33e1 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -1040,7 +1040,7 @@ def __init__( item = json.loads(line) file_info = { - "audio_filepath": item["audio_filepath"], + "audio_filepath": "../" + item["audio_filepath"], "original_text": item["text"], "mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None, "duration": item["duration"] if "duration" in item else None, From ae529c512567552dd50d409d6a7e74240db7d40c Mon Sep 17 00:00:00 2001 From: ericharper Date: Mon, 24 Oct 2022 23:26:43 -0600 Subject: [PATCH 146/244] update branch Signed-off-by: ericharper --- Jenkinsfile | 296 +++++++++--------- nemo/package_info.py | 2 +- requirements/requirements_lightning.txt | 2 +- tutorials/00_NeMo_Primer.ipynb | 2 +- tutorials/01_NeMo_Models.ipynb | 2 +- tutorials/02_NeMo_Adapters.ipynb | 2 +- tutorials/AudioTranslationSample.ipynb | 2 +- ...blish_NeMo_Model_On_Hugging_Face_Hub.ipynb | 2 +- tutorials/VoiceSwapSample.ipynb | 2 +- .../asr/ASR_CTC_Language_Finetuning.ipynb | 2 +- tutorials/asr/ASR_for_telephony_speech.ipynb | 2 +- tutorials/asr/ASR_with_NeMo.ipynb | 4 +- .../asr/ASR_with_Subword_Tokenization.ipynb | 2 +- tutorials/asr/ASR_with_Transducers.ipynb | 2 +- .../asr/Buffered_Transducer_Inference.ipynb | 2 +- ..._Transducer_Inference_with_LCS_Merge.ipynb | 2 +- tutorials/asr/Intro_to_Transducers.ipynb | 2 +- tutorials/asr/Multilang_ASR.ipynb | 2 +- tutorials/asr/Offline_ASR.ipynb | 2 +- .../Offline_ASR_with_VAD_for_CTC_models.ipynb | 2 +- .../asr/Online_ASR_Microphone_Demo.ipynb | 2 +- tutorials/asr/Online_Noise_Augmentation.ipynb | 2 +- .../Online_Offline_Microphone_VAD_Demo.ipynb | 2 +- .../Online_Offline_Speech_Commands_Demo.ipynb | 2 +- .../asr/Self_Supervised_Pre_Training.ipynb | 2 +- tutorials/asr/Speech_Commands.ipynb | 2 +- tutorials/asr/Streaming_ASR.ipynb | 2 +- tutorials/asr/Voice_Activity_Detection.ipynb | 2 +- .../asr/asr_adapters/ASR_with_Adapters.ipynb | 2 +- ...Language_Models_for_Downstream_Tasks.ipynb | 2 +- tutorials/nlp/02_NLP_Tokenizers.ipynb | 4 +- ...a_Preprocessing_and_Cleaning_for_NMT.ipynb | 2 +- tutorials/nlp/Dialogue.ipynb | 2 +- tutorials/nlp/Entity_Linking_Medical.ipynb | 2 +- tutorials/nlp/GLUE_Benchmark.ipynb | 2 +- ...Joint_Intent_and_Slot_Classification.ipynb | 2 +- tutorials/nlp/MegatronBert_export.ipynb | 2 +- ...on_Synthetic_Tabular_Data_Generation.ipynb | 2 +- .../nlp/Multitask_Prompt_and_PTuning.ipynb | 2 +- .../Non_English_Downstream_Tasks_(NER).ipynb | 2 +- .../nlp/Punctuation_and_Capitalization.ipynb | 2 +- ...ion_and_Capitalization_Lexical_Audio.ipynb | 2 +- tutorials/nlp/Question_Answering.ipynb | 2 +- .../nlp/Relation_Extraction-BioMegatron.ipynb | 2 +- tutorials/nlp/Text2Sparql.ipynb | 4 +- ...xt_Classification_Sentiment_Analysis.ipynb | 2 +- .../Token_Classification-BioMegatron.ipynb | 2 +- ...ssification_Named_Entity_Recognition.ipynb | 4 +- .../nlp/Zero_Shot_Intent_Recognition.ipynb | 2 +- .../ASR_with_SpeakerDiarization.ipynb | 2 +- .../Speaker_Diarization_Inference.ipynb | 2 +- .../Speaker_Diarization_Training.ipynb | 2 +- .../Speaker_Identification_Verification.ipynb | 2 +- .../ITN_with_Thutmose_Tagger.ipynb | 2 +- .../Text_(Inverse)_Normalization.ipynb | 2 +- tutorials/text_processing/WFST_Tutorial.ipynb | 2 +- .../tools/CTC_Segmentation_Tutorial.ipynb | 2 +- tutorials/tools/Multispeaker_Simulator.ipynb | 2 +- .../tts/Aligner_Inference_Examples.ipynb | 2 +- tutorials/tts/FastPitch_Finetuning.ipynb | 2 +- .../tts/FastPitch_GermanTTS_Training.ipynb | 2 +- .../tts/FastPitch_MixerTTS_Training.ipynb | 2 +- .../tts/FastPitch_Speaker_Interpolation.ipynb | 2 +- .../tts/Inference_DurationPitchControl.ipynb | 2 +- tutorials/tts/Inference_ModelSelect.ipynb | 2 +- tutorials/tts/NeMo_TTS_Primer.ipynb | 2 +- tutorials/tts/Tacotron2_Training.ipynb | 2 +- 67 files changed, 218 insertions(+), 218 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 6d4b672e64f9..31364fe3ea4b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -56,8 +56,8 @@ pipeline { // stage('Torch TTS unit tests') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // steps { @@ -112,8 +112,8 @@ pipeline { stage('L0: Unit Tests CPU') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } steps { @@ -124,8 +124,8 @@ pipeline { stage('L0: Unit Tests Speech Dataset Processor') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } steps { @@ -137,8 +137,8 @@ pipeline { stage('L0: TN/ITN Tests CPU') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -170,8 +170,8 @@ pipeline { stage('L2: NeMo text processing') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -222,8 +222,8 @@ pipeline { stage('L2: ASR dev run') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -322,8 +322,8 @@ pipeline { stage('L2: ASR dev run - part two') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -353,8 +353,8 @@ pipeline { stage('L2: Speaker dev run') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -475,8 +475,8 @@ pipeline { // stage('L2: ASR DALI dev run') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -543,8 +543,8 @@ pipeline { // stage('L2: ASR RNNT dev run') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -587,8 +587,8 @@ pipeline { stage('L2: ASR Multi-dataloader dev run') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -635,8 +635,8 @@ pipeline { stage('L2: ASR Adapters') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -663,8 +663,8 @@ pipeline { stage('L2: Megatron T5 Adapter PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -707,8 +707,8 @@ pipeline { stage('L2: Megatron T5 Adapter TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -749,8 +749,8 @@ pipeline { stage('L2: Megatron T5 IA3 PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -793,8 +793,8 @@ pipeline { stage('L2: Megatron T5 IA3 TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -835,8 +835,8 @@ pipeline { stage('L2: Megatron GPT Adapter TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -876,8 +876,8 @@ pipeline { stage('L2: Megatron GPT Adapter PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -918,8 +918,8 @@ pipeline { stage('L2: Speech Transcription') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -940,8 +940,8 @@ pipeline { stage('L2: Segmentation Tool') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } stages { @@ -996,8 +996,8 @@ pipeline { stage('L2: G2P Models') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1077,8 +1077,8 @@ pipeline { // stage('L2: Multi-GPU Megatron finetuning') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1104,8 +1104,8 @@ pipeline { stage('L2: STS-b') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1164,8 +1164,8 @@ pipeline { stage('L2: Dialogue Classification') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1335,8 +1335,8 @@ pipeline { stage('L2: Dialogue Generation') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1401,8 +1401,8 @@ pipeline { // stage('L2: Dialogue Generation Part 2') { // when { // anyOf { -// branch 'main' -// changeRequest target: 'main' +// branch 'r1.13.0' +// changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1431,8 +1431,8 @@ pipeline { stage('L2: COPY') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1461,8 +1461,8 @@ pipeline { stage('L2: Duplex Text Normalization') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1499,8 +1499,8 @@ pipeline { // stage('L2: MegaBERT Token Classification') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1525,8 +1525,8 @@ pipeline { stage('L2: BERT Text Classification') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1554,8 +1554,8 @@ pipeline { stage('L2: Parallel BERT Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1613,8 +1613,8 @@ pipeline { stage('L2: Parallel BART Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1674,8 +1674,8 @@ pipeline { stage('L2: Parallel GPT2 Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1735,8 +1735,8 @@ pipeline { stage('L2: Intent and Slot Classification Tasks') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -1775,8 +1775,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Text Classification') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1804,8 +1804,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Autoresume') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1835,8 +1835,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Evaluation from .nemo') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1856,8 +1856,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Train from .nemo') { // when { // anyOf{ - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -1879,8 +1879,8 @@ pipeline { stage('L2: Parallel NLP Examples 2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2004,8 +2004,8 @@ pipeline { stage('Punctuation & Capitalization tarred dataset') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2063,8 +2063,8 @@ pipeline { stage('Punctuation & Capitalization, Different ways of passing labels to model') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2171,8 +2171,8 @@ pipeline { stage('Punctuation & Capitalization inference') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2197,8 +2197,8 @@ pipeline { stage('L2: Parallel Pretraining BERT pretraining from Text/Preprocessed') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2259,8 +2259,8 @@ pipeline { stage('L2: Entity Linking') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2287,8 +2287,8 @@ pipeline { stage('L2: NMT Attention is All You Need Training') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2410,8 +2410,8 @@ pipeline { stage('L2: NMT Attention is All You Need Inference') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2446,8 +2446,8 @@ pipeline { stage('L2: NMT Attention is All You Need Finetuning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2480,8 +2480,8 @@ pipeline { stage('L2: NMT with HuggingFace') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2557,8 +2557,8 @@ pipeline { stage('L2: NMT Tarred Dataset Creation') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2611,8 +2611,8 @@ pipeline { stage('L2: Megatron NMT Training TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2706,8 +2706,8 @@ pipeline { // stage('L2: NMT Bottleneck Fallback') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -2753,8 +2753,8 @@ pipeline { // stage('L2: NMT Bottleneck Architecture') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -2836,8 +2836,8 @@ pipeline { // stage('L2: NMT Bottleneck LVM') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -2919,8 +2919,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -2989,8 +2989,8 @@ pipeline { stage('L2: Megatron RETRO Pretraining and Resume Training') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3061,8 +3061,8 @@ pipeline { stage('L2: Megatron RETRO muTransfer Pretraining Performance') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3144,8 +3144,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: BioMegatron Bert NER Task') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3162,8 +3162,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3234,8 +3234,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3306,8 +3306,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3323,8 +3323,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval PP2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3341,8 +3341,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Prompt Learning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3414,8 +3414,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' // stage('L2: Megatron GPT Convert from Megatron-LM checkpoing and Eval') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'r1.13.0' + // changeRequest target: 'r1.13.0' // } // } // failFast true @@ -3441,8 +3441,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron Change Partitions') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3480,8 +3480,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3576,8 +3576,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3646,8 +3646,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Prompt Learning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3745,8 +3745,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron UL2 Pretraining and Resume Training TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3825,8 +3825,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Eval') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3842,8 +3842,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, TP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3911,8 +3911,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, PP=2') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -3984,8 +3984,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 GLUE/XNLI Finetuning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true @@ -4057,8 +4057,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: TTS Fast dev runs 1') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } parallel { @@ -4168,8 +4168,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L??: Speech Checkpoints tests') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'r1.13.0' + changeRequest target: 'r1.13.0' } } failFast true diff --git a/nemo/package_info.py b/nemo/package_info.py index a908ba0e31ec..0fb58a56fe24 100644 --- a/nemo/package_info.py +++ b/nemo/package_info.py @@ -16,7 +16,7 @@ MAJOR = 1 MINOR = 13 PATCH = 0 -PRE_RELEASE = 'rc0' +PRE_RELEASE = '' # Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index ea1ab05b78fa..259bd1289dc7 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning>=1.7.0 +pytorch-lightning>=1.7.0,<=1.7.7 torchmetrics>=0.4.1rc0 transformers>=4.0.1,<=4.21.2 webdataset>=0.1.48,<=0.1.62 diff --git a/tutorials/00_NeMo_Primer.ipynb b/tutorials/00_NeMo_Primer.ipynb index 5e5dcbb92c1e..aac1ee3b72c6 100644 --- a/tutorials/00_NeMo_Primer.ipynb +++ b/tutorials/00_NeMo_Primer.ipynb @@ -42,7 +42,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/01_NeMo_Models.ipynb b/tutorials/01_NeMo_Models.ipynb index 6f230e62c1a3..c537f2c86855 100644 --- a/tutorials/01_NeMo_Models.ipynb +++ b/tutorials/01_NeMo_Models.ipynb @@ -37,7 +37,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/02_NeMo_Adapters.ipynb b/tutorials/02_NeMo_Adapters.ipynb index 75942c6bf4af..c7c6bd32137e 100644 --- a/tutorials/02_NeMo_Adapters.ipynb +++ b/tutorials/02_NeMo_Adapters.ipynb @@ -25,7 +25,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/AudioTranslationSample.ipynb b/tutorials/AudioTranslationSample.ipynb index c4fec16c4181..f0ab7df20199 100644 --- a/tutorials/AudioTranslationSample.ipynb +++ b/tutorials/AudioTranslationSample.ipynb @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# install Pynini for text normalization\n", diff --git a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb index 3c184b5cbf78..1ecb17e83b06 100644 --- a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb +++ b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb @@ -41,7 +41,7 @@ "!pip install text-unidecode\n", "\n", "### Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/VoiceSwapSample.ipynb b/tutorials/VoiceSwapSample.ipynb index 016737f26a9f..7c895e4e6681 100644 --- a/tutorials/VoiceSwapSample.ipynb +++ b/tutorials/VoiceSwapSample.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# install Pynini for text normalization\n", diff --git a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb index aad696e667b9..27b229af8a4c 100644 --- a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb +++ b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb @@ -39,7 +39,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/ASR_for_telephony_speech.ipynb b/tutorials/asr/ASR_for_telephony_speech.ipynb index 5be3b50502b3..787b448620f7 100644 --- a/tutorials/asr/ASR_for_telephony_speech.ipynb +++ b/tutorials/asr/ASR_for_telephony_speech.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/ASR_with_NeMo.ipynb b/tutorials/asr/ASR_with_NeMo.ipynb index 519456a012af..9b86fab7e900 100644 --- a/tutorials/asr/ASR_with_NeMo.ipynb +++ b/tutorials/asr/ASR_with_NeMo.ipynb @@ -53,7 +53,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", @@ -587,7 +587,7 @@ "\n", "if not os.path.exists(config_path):\n", " # Grab the config we'll use in this example\n", - " BRANCH = 'main'\n", + " BRANCH = 'r1.13.0'\n", " !mkdir configs\n", " !wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/config.yaml\n", "\n", diff --git a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb index 50e4f4536908..224984b64cca 100644 --- a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb +++ b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb @@ -40,7 +40,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/ASR_with_Transducers.ipynb b/tutorials/asr/ASR_with_Transducers.ipynb index f0efdf1cb363..b59153517558 100644 --- a/tutorials/asr/ASR_with_Transducers.ipynb +++ b/tutorials/asr/ASR_with_Transducers.ipynb @@ -28,7 +28,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/Buffered_Transducer_Inference.ipynb b/tutorials/asr/Buffered_Transducer_Inference.ipynb index 2d42749524d9..939355de6368 100644 --- a/tutorials/asr/Buffered_Transducer_Inference.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# Update numba and restart (this is required to update internal numba version of Colab)\n", diff --git a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb index 9a6b7b2380cf..eb4676b6e01f 100644 --- a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb @@ -45,7 +45,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# Update numba and restart (this is required to update internal numba version of Colab)\n", diff --git a/tutorials/asr/Intro_to_Transducers.ipynb b/tutorials/asr/Intro_to_Transducers.ipynb index a82a4804ca56..d03508f52168 100644 --- a/tutorials/asr/Intro_to_Transducers.ipynb +++ b/tutorials/asr/Intro_to_Transducers.ipynb @@ -43,7 +43,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ], "execution_count": null, diff --git a/tutorials/asr/Multilang_ASR.ipynb b/tutorials/asr/Multilang_ASR.ipynb index 8320cc8a07c9..06dd5f3d1ef2 100644 --- a/tutorials/asr/Multilang_ASR.ipynb +++ b/tutorials/asr/Multilang_ASR.ipynb @@ -101,7 +101,7 @@ "\n", "## Install NeMo\n", "## We are using the main branch but you might want to adjust that too\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Offline_ASR.ipynb b/tutorials/asr/Offline_ASR.ipynb index 2dd4cbe9d814..6fc3862fb3a1 100644 --- a/tutorials/asr/Offline_ASR.ipynb +++ b/tutorials/asr/Offline_ASR.ipynb @@ -51,7 +51,7 @@ "id": "I9eIxAyKHREB" }, "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "try:\n", " # Import NeMo Speech Recognition collection\n", " import nemo.collections.asr as nemo_asr\n", diff --git a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb index 29913fe0fe73..d05503c0f1f3 100644 --- a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb +++ b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb @@ -22,7 +22,7 @@ "!pip install wget\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Online_ASR_Microphone_Demo.ipynb b/tutorials/asr/Online_ASR_Microphone_Demo.ipynb index 5d2f1451d1bf..751a33cdd705 100644 --- a/tutorials/asr/Online_ASR_Microphone_Demo.ipynb +++ b/tutorials/asr/Online_ASR_Microphone_Demo.ipynb @@ -26,7 +26,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Noise_Augmentation.ipynb b/tutorials/asr/Online_Noise_Augmentation.ipynb index 5756c7d58ebe..9781d965b0c1 100644 --- a/tutorials/asr/Online_Noise_Augmentation.ipynb +++ b/tutorials/asr/Online_Noise_Augmentation.ipynb @@ -31,7 +31,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb b/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb index 2076bc06982b..43b7c74e1db8 100644 --- a/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb +++ b/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb @@ -26,7 +26,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb b/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb index 2488e46287a6..3e1f05369e48 100644 --- a/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb +++ b/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb @@ -28,7 +28,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Self_Supervised_Pre_Training.ipynb b/tutorials/asr/Self_Supervised_Pre_Training.ipynb index 0f0270c1ad75..c0b0f8aff869 100644 --- a/tutorials/asr/Self_Supervised_Pre_Training.ipynb +++ b/tutorials/asr/Self_Supervised_Pre_Training.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index 14cf1dc3812f..40ce00ae23c2 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -60,7 +60,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Streaming_ASR.ipynb b/tutorials/asr/Streaming_ASR.ipynb index 5d4d5b188e18..f4aa8d160057 100644 --- a/tutorials/asr/Streaming_ASR.ipynb +++ b/tutorials/asr/Streaming_ASR.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index f0d2ef14ce6f..8ef5322b13a2 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -27,7 +27,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb b/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb index 468c602a8765..2fcd2f399940 100644 --- a/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb +++ b/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb @@ -51,7 +51,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb b/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb index faa93de12514..120207831ce6 100644 --- a/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb +++ b/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb @@ -26,7 +26,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/02_NLP_Tokenizers.ipynb b/tutorials/nlp/02_NLP_Tokenizers.ipynb index c63d2a8b1689..f6b56e0712a3 100644 --- a/tutorials/nlp/02_NLP_Tokenizers.ipynb +++ b/tutorials/nlp/02_NLP_Tokenizers.ipynb @@ -10,7 +10,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { @@ -35,7 +35,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb b/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb index 323bfa1c49b8..e535f7594f97 100644 --- a/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb +++ b/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb @@ -300,7 +300,7 @@ "\n", "## Install NeMo\n", "\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "!pip uninstall -y sacrebleu\n", diff --git a/tutorials/nlp/Dialogue.ipynb b/tutorials/nlp/Dialogue.ipynb index ddd3bdd4f929..8395fb4c8eb1 100644 --- a/tutorials/nlp/Dialogue.ipynb +++ b/tutorials/nlp/Dialogue.ipynb @@ -27,7 +27,7 @@ "outputs": [], "source": [ "import os \n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!apt-get update && apt-get install -y libsndfile1 ffmpeg\n", "!git clone https://github.com/NVIDIA/NeMo --branch $BRANCH\n", "os.chdir('NeMo')\n", diff --git a/tutorials/nlp/Entity_Linking_Medical.ipynb b/tutorials/nlp/Entity_Linking_Medical.ipynb index 0d7a1d5c8de5..dd41a25e5601 100644 --- a/tutorials/nlp/Entity_Linking_Medical.ipynb +++ b/tutorials/nlp/Entity_Linking_Medical.ipynb @@ -17,7 +17,7 @@ "\"\"\"\n", "\n", "## Install NeMo if using google collab or if its not installed locally\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/nlp/GLUE_Benchmark.ipynb b/tutorials/nlp/GLUE_Benchmark.ipynb index d8fe75940b09..203a278bea88 100644 --- a/tutorials/nlp/GLUE_Benchmark.ipynb +++ b/tutorials/nlp/GLUE_Benchmark.ipynb @@ -44,7 +44,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" + "BRANCH = 'r1.13.0'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb index 104d69df18e2..c548bdb02161 100644 --- a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb +++ b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/MegatronBert_export.ipynb b/tutorials/nlp/MegatronBert_export.ipynb index f925d2bc59b0..54ad754e4617 100644 --- a/tutorials/nlp/MegatronBert_export.ipynb +++ b/tutorials/nlp/MegatronBert_export.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='main'" + "BRANCH='r1.13.0'" ] }, { diff --git a/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb b/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb index 3dc3d6ce192e..985fdb568042 100644 --- a/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb +++ b/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb @@ -62,7 +62,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "DATA_PATH='.'\n", "TRANSACTIONS=DATA_PATH+'/card_transaction.v1.csv'\n", "#CHECKPOINTS='/chk_points'\n", diff --git a/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb b/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb index b03316bfce02..5480c3c1a5b4 100644 --- a/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb +++ b/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='main'" + "BRANCH='r1.13.0'" ] }, { diff --git a/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb b/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb index bfa56e5a2567..f088f8ca4627 100644 --- a/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb +++ b/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb @@ -8,7 +8,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { diff --git a/tutorials/nlp/Punctuation_and_Capitalization.ipynb b/tutorials/nlp/Punctuation_and_Capitalization.ipynb index 1519c234372b..aa80ebb5bd91 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index ef8f0bd33353..20333de59b38 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -10,7 +10,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { diff --git a/tutorials/nlp/Question_Answering.ipynb b/tutorials/nlp/Question_Answering.ipynb index 5ce89b3baafc..f461a5f651ef 100644 --- a/tutorials/nlp/Question_Answering.ipynb +++ b/tutorials/nlp/Question_Answering.ipynb @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { diff --git a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb index b7c25cb416ef..54ff9d7ccabb 100644 --- a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb +++ b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { diff --git a/tutorials/nlp/Text2Sparql.ipynb b/tutorials/nlp/Text2Sparql.ipynb index b734e72c1fc6..5b238ca27e60 100644 --- a/tutorials/nlp/Text2Sparql.ipynb +++ b/tutorials/nlp/Text2Sparql.ipynb @@ -20,7 +20,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, @@ -149,7 +149,7 @@ "WORK_DIR = \"PATH_TO_CHECKPOINTS_AND_LOGS\"\n", "\n", "# NeMo Version\n", - "BRANCH = 'main'\n" + "BRANCH = 'r1.13.0'\n" ] }, { diff --git a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb index 5b5b74e7bf11..b38f23002b6e 100644 --- a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb +++ b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb @@ -20,7 +20,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", "\n" ] diff --git a/tutorials/nlp/Token_Classification-BioMegatron.ipynb b/tutorials/nlp/Token_Classification-BioMegatron.ipynb index b07dfb061625..304befe44a14 100644 --- a/tutorials/nlp/Token_Classification-BioMegatron.ipynb +++ b/tutorials/nlp/Token_Classification-BioMegatron.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='main'" + "BRANCH='r1.13.0'" ] }, { diff --git a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb index 0e8fadde8041..bc41c8568844 100644 --- a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb +++ b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'main'" + "BRANCH = 'r1.13.0'" ] }, { @@ -53,7 +53,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" + "BRANCH = 'r1.13.0'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb index 69df7b27b02d..05706014b9ba 100644 --- a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb +++ b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb index 6ba29f2ce484..96a08f4ab412 100644 --- a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb +++ b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb @@ -30,7 +30,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb index fd870db6ac0a..c5e1eb3fb95f 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb @@ -23,7 +23,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb index 8d67b54b17de..77041398edb5 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb @@ -18,7 +18,7 @@ "\"\"\"\n", "\n", "NEMO_DIR_PATH = \"NeMo\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "\n", "! git clone https://github.com/NVIDIA/NeMo\n", "%cd NeMo\n", diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index 8e3ae9c1f131..0b19f83bbcd8 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -27,7 +27,7 @@ "!pip install text-unidecode\n", "\n", "## Install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "# Install TorchAudio\n", diff --git a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb index b72cee51003b..dcf944769e9f 100644 --- a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb +++ b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb @@ -21,7 +21,7 @@ "import os\n", "\n", "# install NeMo\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "\n", "GITHUB_ACCOUNT = 'NVIDIA' # change this if using a fork\n", "\n", diff --git a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb index 596523b41c0a..e00dfc9463de 100644 --- a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb +++ b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb @@ -60,7 +60,7 @@ "outputs": [], "source": [ "## Install NeMo, which installs both nemo and nemo_text_processing package\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", "\n", "# install Pynini for text normalization\n", diff --git a/tutorials/text_processing/WFST_Tutorial.ipynb b/tutorials/text_processing/WFST_Tutorial.ipynb index ed7127241dd5..51daded0b796 100644 --- a/tutorials/text_processing/WFST_Tutorial.ipynb +++ b/tutorials/text_processing/WFST_Tutorial.ipynb @@ -39,7 +39,7 @@ "outputs": [], "source": [ "## Install NeMo, which installs both nemo and nemo_text_processing package\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nemo_text_processing]\n", "\n", "# install Pynini for text normalization\n", diff --git a/tutorials/tools/CTC_Segmentation_Tutorial.ipynb b/tutorials/tools/CTC_Segmentation_Tutorial.ipynb index d22258885db8..25f63da12df9 100644 --- a/tutorials/tools/CTC_Segmentation_Tutorial.ipynb +++ b/tutorials/tools/CTC_Segmentation_Tutorial.ipynb @@ -35,7 +35,7 @@ "id": "d4KCUoxSpdoZ" }, "source": [ - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "\n", "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", diff --git a/tutorials/tools/Multispeaker_Simulator.ipynb b/tutorials/tools/Multispeaker_Simulator.ipynb index cf2993254e76..e8d8d1b09f89 100644 --- a/tutorials/tools/Multispeaker_Simulator.ipynb +++ b/tutorials/tools/Multispeaker_Simulator.ipynb @@ -18,7 +18,7 @@ "\"\"\"\n", "\n", "NEMO_DIR_PATH = \"NeMo\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "\n", "! git clone https://github.com/NVIDIA/NeMo\n", "%cd NeMo\n", diff --git a/tutorials/tts/Aligner_Inference_Examples.ipynb b/tutorials/tts/Aligner_Inference_Examples.ipynb index f6acbfa2c0d4..d32305579166 100644 --- a/tutorials/tts/Aligner_Inference_Examples.ipynb +++ b/tutorials/tts/Aligner_Inference_Examples.ipynb @@ -39,7 +39,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/FastPitch_Finetuning.ipynb b/tutorials/tts/FastPitch_Finetuning.ipynb index 47ead32fbd65..039c87064879 100755 --- a/tutorials/tts/FastPitch_Finetuning.ipynb +++ b/tutorials/tts/FastPitch_Finetuning.ipynb @@ -57,7 +57,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4\n", diff --git a/tutorials/tts/FastPitch_GermanTTS_Training.ipynb b/tutorials/tts/FastPitch_GermanTTS_Training.ipynb index 145e7de2d43b..5dafd96c0dc6 100644 --- a/tutorials/tts/FastPitch_GermanTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_GermanTTS_Training.ipynb @@ -51,7 +51,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4 scipy==1.7.3\n", diff --git a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb index 1dfd14ed2972..8fefad8782ea 100644 --- a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb @@ -50,7 +50,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4 scipy==1.7.3\n", diff --git a/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb b/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb index eda5bba0aa1e..7f55c3d2e12f 100644 --- a/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb +++ b/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb @@ -94,7 +94,7 @@ "source": [ "# Install NeMo library. If you are running locally (rather than on Google Colab), comment out the below lines\n", "# and instead follow the instructions at https://github.com/NVIDIA/NeMo#Installation\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/tts/Inference_DurationPitchControl.ipynb b/tutorials/tts/Inference_DurationPitchControl.ipynb index c4879f38274c..59a01c628449 100644 --- a/tutorials/tts/Inference_DurationPitchControl.ipynb +++ b/tutorials/tts/Inference_DurationPitchControl.ipynb @@ -46,7 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/Inference_ModelSelect.ipynb b/tutorials/tts/Inference_ModelSelect.ipynb index 8fe398edafa6..71067530b311 100644 --- a/tutorials/tts/Inference_ModelSelect.ipynb +++ b/tutorials/tts/Inference_ModelSelect.ipynb @@ -46,7 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/NeMo_TTS_Primer.ipynb b/tutorials/tts/NeMo_TTS_Primer.ipynb index c904791ff228..054fdca86259 100644 --- a/tutorials/tts/NeMo_TTS_Primer.ipynb +++ b/tutorials/tts/NeMo_TTS_Primer.ipynb @@ -25,7 +25,7 @@ "source": [ "# Install NeMo library. If you are running locally (rather than on Google Colab), comment out the below lines\n", "# and instead follow the instructions at https://github.com/NVIDIA/NeMo#Installation\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/tts/Tacotron2_Training.ipynb b/tutorials/tts/Tacotron2_Training.ipynb index 99ba165f7287..db93e46592c1 100644 --- a/tutorials/tts/Tacotron2_Training.ipynb +++ b/tutorials/tts/Tacotron2_Training.ipynb @@ -54,7 +54,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'main'\n", + "BRANCH = 'r1.13.0'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", From 15a53ece24b15d12c7bf775f1ab1be97603dfc4c Mon Sep 17 00:00:00 2001 From: Jocelyn Date: Wed, 26 Oct 2022 09:52:12 -0700 Subject: [PATCH 147/244] Fix link to inference notebook (#5247) Signed-off-by: Jocelyn Huang Signed-off-by: Jocelyn Huang --- tutorials/tts/NeMo_TTS_Primer.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/tts/NeMo_TTS_Primer.ipynb b/tutorials/tts/NeMo_TTS_Primer.ipynb index 054fdca86259..6b9ec79a53f1 100644 --- a/tutorials/tts/NeMo_TTS_Primer.ipynb +++ b/tutorials/tts/NeMo_TTS_Primer.ipynb @@ -1991,7 +1991,7 @@ "\n", "To get more hands on experience with NeMo TTS, look through some of our other [tutorials](https://github.com/NVIDIA/NeMo/tree/main/tutorials/tts).\n", "\n", - "* Running pretrained models: [Inference_ModelSelect](https://github.com/NVIDIA/NeMo/blob/main)\n", + "* Running pretrained models: [Inference_ModelSelect](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tts/Inference_ModelSelect.ipynb)\n", "* FastPitch [training](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tts/FastPitch_MixerTTS_Training.ipynb) and [fine-tuning](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tts/FastPitch_Finetuning.ipynb)\n", "\n", "To learn how to deploy and serve your TTS models, visit [Riva](https://docs.nvidia.com/deeplearning/riva/index.html)." @@ -2058,4 +2058,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 16800d4ff0625fd3fb3a5b16a2d31231d77ef44b Mon Sep 17 00:00:00 2001 From: Somshubra Majumdar Date: Wed, 26 Oct 2022 19:06:35 -0700 Subject: [PATCH 148/244] Update ASR scores table (#5254) Signed-off-by: smajumdar Signed-off-by: smajumdar --- .../asr/data/scores/be/conformer_be.csv | 3 ++ .../asr/data/scores/ca/conformer_ca.csv | 6 +-- .../asr/data/scores/ca/quartznet15x5_ca.csv | 4 +- .../asr/data/scores/en/conformer_en.csv | 4 +- .../asr/data/scores/en/squeezeformer_en.csv | 7 +++ .../asr/data/scores/enes/conformer_enes.csv | 8 ++-- .../asr/data/scores/enes/contextnet_enes.csv | 4 +- .../asr/data/scores/hr/conformer_hr.csv | 2 +- .../asr/data/scores/rw/conformer_rw.csv | 4 +- docs/source/asr/scores.rst | 44 ++++++++++++++++++- 10 files changed, 69 insertions(+), 17 deletions(-) create mode 100644 docs/source/asr/data/scores/be/conformer_be.csv create mode 100644 docs/source/asr/data/scores/en/squeezeformer_en.csv diff --git a/docs/source/asr/data/scores/be/conformer_be.csv b/docs/source/asr/data/scores/be/conformer_be.csv new file mode 100644 index 000000000000..12fcfe0e554b --- /dev/null +++ b/docs/source/asr/data/scores/be/conformer_be.csv @@ -0,0 +1,3 @@ +Model Name,Language,MCV Test-Set v10 (be) +stt_be_conformer_ctc_large,be,4.7 % +stt_be_conformer_transducer_large,be,3.8 % diff --git a/docs/source/asr/data/scores/ca/conformer_ca.csv b/docs/source/asr/data/scores/ca/conformer_ca.csv index a9c139354738..bc30b90a25b4 100644 --- a/docs/source/asr/data/scores/ca/conformer_ca.csv +++ b/docs/source/asr/data/scores/ca/conformer_ca.csv @@ -1,3 +1,3 @@ -Model Name,Language,MCV Test-Set v9.0 (ca) -stt_ca_conformer_ctc_large,ca,4.27 -stt_ca_conformer_transducer_large,ca,3.85 \ No newline at end of file +Model Name,Language,MCV Dev-Set (v??) (ca),MCV Dev-Set v9.0 (ca),MCV Test-Set v9.0 (ca) +stt_ca_conformer_ctc_large,ca,,4.70,4.27 +stt_ca_conformer_transducer_large,ca,,4.43,3.85 diff --git a/docs/source/asr/data/scores/ca/quartznet15x5_ca.csv b/docs/source/asr/data/scores/ca/quartznet15x5_ca.csv index 1082d5c3d749..6b826662e25e 100644 --- a/docs/source/asr/data/scores/ca/quartznet15x5_ca.csv +++ b/docs/source/asr/data/scores/ca/quartznet15x5_ca.csv @@ -1,2 +1,2 @@ -Model Name,Language,MCV Dev-Set (v??) (ca) -stt_ca_quartznet15x5,ca,6.0 +Model Name,Language,MCV Dev-Set (v??) (ca),MCV Dev-Set v9.0 (ca),MCV Test-Set v9.0 (ca) +stt_ca_quartznet15x5,ca,6.0,, diff --git a/docs/source/asr/data/scores/en/conformer_en.csv b/docs/source/asr/data/scores/en/conformer_en.csv index 57a8ad69b0b2..23ec44382578 100644 --- a/docs/source/asr/data/scores/en/conformer_en.csv +++ b/docs/source/asr/data/scores/en/conformer_en.csv @@ -1,14 +1,14 @@ Model Name,Language,Librispeech Dev-Clean,Librispeech Dev-Other,Librispeech Test-Clean,Librispeech Test-Other,MCV Test-Set v8.0 (en),MLS Dev (en),MLS Test (en),NSC Part1,NSC Part6,Peoples Speech Test v1,SLR 83 Test,WSJ Dev 93,WSJ Eval 92 stt_en_conformer_ctc_small,en,3.6,8.1,3.7,8.1,,,,,,,,, stt_en_conformer_ctc_medium,en,2.5,5.8,2.6,5.9,,,,,,,,, -stt_en_conformer_ctc_large,en,2.0,4.4,2.1,4.3,,,,,,,,, +stt_en_conformer_ctc_large,en,1.9,4.4,2.1,4.5,,,,,,,,, stt_en_conformer_ctc_xlarge,en,1.77 %,3.79 %,2.00 %,3.74 %,7.88 %,,5.99 %,,6.44 %,22.90 %,5.50 %,2.36 %, stt_en_conformer_ctc_small_ls,en,3.3,8.8,3.4,8.8,,,,,,,,, stt_en_conformer_ctc_medium_ls,en,2.7,7.4,3.0,7.3,,,,,,,,, stt_en_conformer_ctc_large_ls,en,2.4,6.2,2.7,6.0,,,,,,,,, stt_en_conformer_transducer_small,en,2.8,6.6,2.5,6.6,,,,,,,,, stt_en_conformer_transducer_medium,en,2.0,4.6,2.1,4.7,,,,,,,,, -stt_en_conformer_transducer_large,en,1.5,3.5,1.7,3.6,,,,,,,,, +stt_en_conformer_transducer_large,en,1.6,3.5,1.7,3.7,,,,,,,,, stt_en_conformer_transducer_large_ls,en,2.1,5.0,2.3,5.1,,,,,,,,, stt_en_conformer_transducer_xlarge,en,1.48 %,2.95 %,1.62 %,3.01 %,6.46 %,4.59 %,5.32 %,5.70 %,6.47 %,21.32 %,,2.05 %,1.17 % stt_en_conformer_transducer_xxlarge,en,1.52 %,3.09 %,1.72 %,3.14 %,,5.29 %,5.85 %,6.64 %,,,,2.42 %,1.49 % diff --git a/docs/source/asr/data/scores/en/squeezeformer_en.csv b/docs/source/asr/data/scores/en/squeezeformer_en.csv new file mode 100644 index 000000000000..fdbd9bd99665 --- /dev/null +++ b/docs/source/asr/data/scores/en/squeezeformer_en.csv @@ -0,0 +1,7 @@ +Model Name,Language,Librispeech Dev-Clean,Librispeech Dev-Other,Librispeech Test-Clean,Librispeech Test-Other,MCV Test-Set v8.0 (en),MLS Dev (en),MLS Test (en),NSC Part1,NSC Part6,Peoples Speech Test v1,SLR 83 Test,WSJ Dev 93,WSJ Eval 92 +stt_en_squeezeformer_ctc_xsmall_ls,en,3.6 %,9.7 %,3.8 %,9.4 %,,,,,,,,, +stt_en_squeezeformer_ctc_small_ls,en,2.9 %,7.4 %,3.1 %,7.4 %,,,,,,,,, +stt_en_squeezeformer_ctc_small_medium_ls,en,2.7 %,7.0 %,2.8 %,7.1 %,,,,,,,,, +stt_en_squeezeformer_ctc_medium_ls,en,2.4 %,6.2 %,2.6 %,6.3 %,,,,,,,,, +stt_en_squeezeformer_ctc_medium_large_ls,en,2.3 %,6.0 %,2.5 %,5.9 %,,,,,,,,, +stt_en_squeezeformer_ctc_large_ls,en,2.3 %,5.7 %,2.4 %,5.7 %,,,,,,,,, diff --git a/docs/source/asr/data/scores/enes/conformer_enes.csv b/docs/source/asr/data/scores/enes/conformer_enes.csv index 9e3cad59944c..983e664d4de1 100644 --- a/docs/source/asr/data/scores/enes/conformer_enes.csv +++ b/docs/source/asr/data/scores/enes/conformer_enes.csv @@ -1,3 +1,5 @@ -Model Name,Language,Fisher-Dev-Es,Librispeech Dev-Clean,Librispeech Dev-Other,Librispeech Test-Clean,Librispeech Test-Other,MCV Dev-Set v7.0 (en),MLS Dev (es),VoxPopuli Dev (es) -stt_enes_conformer_ctc_large,enes,16.7 %,2.2 %,5.5 %,2.6 %,5.5 %,5.8 %,3.5 %,5.7 % -stt_enes_conformer_transducer_large,enes,16.2 %,2.0 %,4.6 %,2.2 %,4.6 %,5.0 %,3.3 %,5.3 % +Model Name,Language,Fisher-Dev-En,Fisher-Dev-Es,Fisher-Test-En,Fisher-Test-Es,Librispeech Dev-Clean,Librispeech Dev-Other,Librispeech Test-Clean,Librispeech Test-Other,MCV Dev-Set v7.0 (en),MCV Dev-Set v7.0 (es),MCV Test-Set v7.0 (en),MCV Test-Set v7.0 (es),MLS Dev (en),MLS Dev (es),MLS Test (en),MLS Test (es),VoxPopuli Dev (en),VoxPopuli Dev (es),VoxPopuli Test (en),VoxPopuli Test (es) +stt_enes_conformer_ctc_large,enes,,16.7 %,,,2.2 %,5.5 %,2.6 %,5.5 %,5.8 %,,,,,3.5 %,,,,5.7 %,, +stt_enes_conformer_ctc_large_codesw,enes,,16.51 %,,16.31 %,2.22 %,5.36 %,2.55 %,5.38 %,,5.00 %,,5.51 %,,3.46 %,,3.73 %,,5.58 %,,6.63 % +stt_enes_conformer_transducer_large,enes,,16.2 %,,,2.0 %,4.6 %,2.2 %,4.6 %,5.0 %,,,,,3.3 %,,,,5.3 %,, +stt_enes_conformer_transducer_large_codesw,enes,15.70 %,,15.66 %,,1.97 %,4.54 %,2.17 %,4.53 %,4.51 %,,5.06 %,,3.27 %,,3.67 %,,5.28 %,,6.54 %, diff --git a/docs/source/asr/data/scores/enes/contextnet_enes.csv b/docs/source/asr/data/scores/enes/contextnet_enes.csv index 694820ac1b88..72a895303bbb 100644 --- a/docs/source/asr/data/scores/enes/contextnet_enes.csv +++ b/docs/source/asr/data/scores/enes/contextnet_enes.csv @@ -1,2 +1,2 @@ -Model Name,Language,Fisher-Dev-Es,Librispeech Dev-Clean,Librispeech Dev-Other,Librispeech Test-Clean,Librispeech Test-Other,MCV Dev-Set v7.0 (en),MLS Dev (es),VoxPopuli Dev (es) -stt_enes_contextnet_large,enes,14.8 %,2.2 %,5.6 %,2.3 %,5.5 %,4.7 %,3.0 %,5.0 % +Model Name,Language,Fisher-Dev-En,Fisher-Dev-Es,Fisher-Test-En,Fisher-Test-Es,Librispeech Dev-Clean,Librispeech Dev-Other,Librispeech Test-Clean,Librispeech Test-Other,MCV Dev-Set v7.0 (en),MCV Dev-Set v7.0 (es),MCV Test-Set v7.0 (en),MCV Test-Set v7.0 (es),MLS Dev (en),MLS Dev (es),MLS Test (en),MLS Test (es),VoxPopuli Dev (en),VoxPopuli Dev (es),VoxPopuli Test (en),VoxPopuli Test (es) +stt_enes_contextnet_large,enes,,14.8 %,,,2.2 %,5.6 %,2.3 %,5.5 %,4.7 %,,,,,3.0 %,,,,5.0 %,, diff --git a/docs/source/asr/data/scores/hr/conformer_hr.csv b/docs/source/asr/data/scores/hr/conformer_hr.csv index 9c8128534b2f..04383a14e888 100644 --- a/docs/source/asr/data/scores/hr/conformer_hr.csv +++ b/docs/source/asr/data/scores/hr/conformer_hr.csv @@ -1,3 +1,3 @@ -Model Name,Language,ParlaSpeech-HR v1.0 (dev),ParlaSpeech-HR v1.0 (test) +Model Name,Language,ParlaSpeech Dev-Set v1.0 (hr),ParlaSpeech Test-Set v1.0 (hr) stt_hr_conformer_ctc_large,hr,4.43,4.70 stt_hr_conformer_transducer_large,hr,4.56,4.69 diff --git a/docs/source/asr/data/scores/rw/conformer_rw.csv b/docs/source/asr/data/scores/rw/conformer_rw.csv index 52196a54335f..e5544a8067d5 100644 --- a/docs/source/asr/data/scores/rw/conformer_rw.csv +++ b/docs/source/asr/data/scores/rw/conformer_rw.csv @@ -1,3 +1,3 @@ Model Name,Language,MCV Test-Set v9.0 (rw) -stt_rw_conformer_ctc_large,rw,18.22 -stt_rw_conformer_transducer_large,rw,16.19 \ No newline at end of file +stt_rw_conformer_ctc_large,rw,18.2 % +stt_rw_conformer_transducer_large,rw,16.2 % diff --git a/docs/source/asr/scores.rst b/docs/source/asr/scores.rst index 9f436ffccb35..2f75ae0a3db8 100644 --- a/docs/source/asr/scores.rst +++ b/docs/source/asr/scores.rst @@ -42,16 +42,26 @@ EN -------------------- -CA +.. csv-table:: + :header-rows: 1 + :align: left + :file: data/scores/en/squeezeformer_en.csv + +-------------------- + +BE ^^ .. csv-table:: :header-rows: 1 :align: left - :file: data/scores/ca/quartznet15x5_ca.csv + :file: data/scores/be/conformer_be.csv -------------------- +CA +^^ + .. csv-table:: :header-rows: 1 :align: left @@ -59,6 +69,13 @@ CA -------------------- +.. csv-table:: + :header-rows: 1 + :align: left + :file: data/scores/ca/quartznet15x5_ca.csv + +-------------------- + DE ^^ @@ -189,6 +206,16 @@ IT -------------------- +KAB +^^^ + +.. csv-table:: + :header-rows: 1 + :align: left + :file: data/scores/kab/conformer_kab.csv + +-------------------- + PL ^^ @@ -209,6 +236,16 @@ RU -------------------- +RW +^^ + +.. csv-table:: + :header-rows: 1 + :align: left + :file: data/scores/rw/conformer_rw.csv + +-------------------- + ZH ^^ @@ -223,3 +260,6 @@ ZH :header-rows: 1 :align: left :file: data/scores/zh/conformer_zh.csv + +-------------------- + From 2612f4813949543aa0ca7ce8cdbd0643722e243b Mon Sep 17 00:00:00 2001 From: Sean Naren Date: Thu, 27 Oct 2022 17:27:49 +0100 Subject: [PATCH 149/244] Fix links to speaker identification notebook (#5260) Signed-off-by: SeanNaren Signed-off-by: SeanNaren --- examples/speaker_tasks/recognition/README.md | 4 ++-- examples/speaker_tasks/recognition/speaker_reco.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/speaker_tasks/recognition/README.md b/examples/speaker_tasks/recognition/README.md index f6c84e6e68c6..0e0f5ae3b4fc 100644 --- a/examples/speaker_tasks/recognition/README.md +++ b/examples/speaker_tasks/recognition/README.md @@ -29,14 +29,14 @@ For training ecapa_tdnn (channel-attention) model: ```bash python speaker_reco.py --config_path='conf' --config_name='ecapa_tdnn.yaml' ``` -For step by step tutorial see [notebook](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb). +For step by step tutorial see [notebook](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb). ### Fine Tuning For fine tuning on a pretrained .nemo speaker recognition model, ```bash python speaker_reco_finetune.py --config_path='conf' --config_name='titanet-finetune.yaml' ``` -for fine tuning tips see this [tutorial](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb) +for fine tuning tips see this [tutorial](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb) ## Inference We provide generic scripts for manifest file creation, embedding extraction, Voxceleb evaluation and speaker ID inference. Hence most of the steps would be common and differ slightly based on your end application. diff --git a/examples/speaker_tasks/recognition/speaker_reco.py b/examples/speaker_tasks/recognition/speaker_reco.py index f1d58cb11268..f6ec557cf868 100644 --- a/examples/speaker_tasks/recognition/speaker_reco.py +++ b/examples/speaker_tasks/recognition/speaker_reco.py @@ -37,7 +37,7 @@ exp_manager.name=$EXP_NAME +exp_manager.use_datetime_version=False \ exp_manager.exp_dir='./speaker_exps' -See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Recognition_Verification.ipynb for notebook tutorial +See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb for notebook tutorial Optional: Use tarred dataset to speech up data loading. Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset. From ead8cc4ef59bb68e435dffca1646dfe3a6bb4399 Mon Sep 17 00:00:00 2001 From: Jocelyn Date: Fri, 28 Oct 2022 10:11:55 -0700 Subject: [PATCH 150/244] Minor typo fixes in TTS tutorial (#5266) Signed-off-by: Jocelyn Huang Signed-off-by: Jocelyn Huang --- tutorials/tts/NeMo_TTS_Primer.ipynb | 285 ++++++++++++++-------------- 1 file changed, 147 insertions(+), 138 deletions(-) diff --git a/tutorials/tts/NeMo_TTS_Primer.ipynb b/tutorials/tts/NeMo_TTS_Primer.ipynb index 6b9ec79a53f1..0580d061d7fa 100644 --- a/tutorials/tts/NeMo_TTS_Primer.ipynb +++ b/tutorials/tts/NeMo_TTS_Primer.ipynb @@ -214,7 +214,7 @@ " \n", "\n", "\n", - "The above examples may be slightly different than the output of the NeMo text normalization code. More details on NeMo text normalization can be found in the our [TN documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html).\n", + "The above examples may be slightly different than the output of the NeMo text normalization code. More details on NeMo text normalization can be found in the [TN documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/text_normalization/intro.html).\n", "\n", "A more comprehensive list of text normalization rules, examples, and languages are available in the [code](https://github.com/NVIDIA/NeMo/tree/main/nemo_text_processing/text_normalization).\n", "\n" @@ -301,8 +301,7 @@ "source": [ "Today text normalization is typically a very manual process involving lots of rules, heuristics, and regular expressions.\n", "\n", - "It is difficult to train a machine learning model to automate this step due to lack of labeled data. To get ground truth data one would need to manually annotate the entire dataset. The resulting model would then have strictly worse performance than the the manual system producing the labels, making it better to use the original labeling system rather than the model.\n", - "\n" + "It is difficult to train a machine learning model to automate this step due to lack of labeled data. To get ground truth data one would need to manually annotate the entire dataset. The resulting model would then have strictly worse performance than the manual system producing the labels, making it better to use the original labeling system rather than the model." ] }, { @@ -348,7 +347,7 @@ "\n", "For example (using [ARPABET](https://en.wikipedia.org/wiki/ARPABET)): *Hello World → HH, AH0, L, OW1, ,W, ER1, L, D*\n", "\n", - "Some languages, such as Spanish and German, are *phonetic*, meaning their written characters/graphemes are always pronounced the same. For such languages G2P is unnecesary.\n", + "Some languages, such as Spanish and German, are *phonetic*, meaning their written characters/graphemes are always pronounced the same. For such languages G2P is unnecessary.\n", "\n", "However English is not Phonetic because:\n", "* Characters change pronunciation depending on what word they are in.\n", @@ -622,7 +621,7 @@ "\n", "Most of the earlier descriptions about Text Normalization are also the same for G2P, in regards to it being difficult to get labeled data to train a machine learning model to do it automatically and challenging to generalize and scale across languages.\n", "\n", - "The most common way that G2P is done today is to to hardcode the grapheme to phoneme mapping for all common words in a language in a **pronouncing dictionary**.\n", + "The most common way that G2P is done today is to hardcode the grapheme to phoneme mapping for all common words in a language in a **pronouncing dictionary**.\n", "\n", "A few examples of dictionary entries:\n", "```\n", @@ -745,15 +744,15 @@ }, { "cell_type": "markdown", - "source": [ - "## 7.1 Audio" - ], "metadata": { "id": "_yo7Ru_GMA0E", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 7.1 Audio" + ] }, { "cell_type": "markdown", @@ -854,7 +853,7 @@ "\n", "With 2 dimensions we can effectively use **CNNs** by running [temporal convolutions](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html?highlight=conv1d#torch.nn.Conv1d) over the time dimension. Or by applying [2d convolutions](https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html?highlight=conv2d#torch.nn.Conv2d) to the spectrogram exactly as if it were an image in computer vision.\n", "\n", - "**Transformers** require computation/memory that is proportional to the length of the sequence squared. This means we can easily use large transformers for relatively short sequences like in NLP, smaller transformers for longer sequences like spectrogram data, and are inpractical to use on very long sequences like audio samples." + "**Transformers** require computation/memory that is proportional to the length of the sequence squared. This means we can easily use large transformers for relatively short sequences like in NLP, smaller transformers for longer sequences like spectrogram data, and are impractical to use on very long sequences like audio samples." ] }, { @@ -880,7 +879,7 @@ "source": [ "Before we go into the details of how this works, let's go through an end-to-end text to audio example so we can visualize what our model inputs and outputs look and sound like.\n", "\n", - "To do this, we will need to use both the spectrogram and vocoder models together. The vocoder will be looked at more throughly in the *audio synthesis* section." + "To do this, we will need to use both the spectrogram and vocoder models together. The vocoder will be looked at more thoroughly in the *audio synthesis* section." ] }, { @@ -1039,15 +1038,15 @@ }, { "cell_type": "markdown", - "source": [ - "### 7.6.1 Tacotron 2" - ], "metadata": { "id": "lGVKcJp6Y7Kv", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "### 7.6.1 Tacotron 2" + ] }, { "cell_type": "markdown", @@ -1203,9 +1202,9 @@ "* The attention should be **monotonically increasing**, meaning it never go backwards in the text sequence. So the attention should only ever stay on the current character, or move forward to the next character.\n", "* The model should start on the first character in the sequence and end on the last character.\n", "\n", - "These contraints result in the decoder effectively \"reading\" the text character by character or word by word, similar to how humans read aloud.\n", + "These constraints result in the decoder effectively \"reading\" the text character by character or word by word, similar to how humans read aloud.\n", "\n", - "A model may need to be trained for a while before its attention learns to follow these constraints. Before that, the attention may look non-sensical, and the model output will sound unintelligable.\n", + "A model may need to be trained for a while before its attention learns to follow these constraints. Before that, the attention may look non-sensical, and the model output will sound unintelligible.\n", "\n", "Once the models learns the above constraints and starts producing well-behaved attention maps, it is said that the model has **aligned**." ] @@ -1395,20 +1394,26 @@ }, { "cell_type": "markdown", - "source": [ - "## 7.7 Duration Prediction\n" - ], "metadata": { "id": "uya9DJ1SWwEx", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 7.7 Duration Prediction\n" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "O6uH8q-BZjko", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ - "A large weakness of the original Tacotron 2 model is its attention mechanism, which does not enforce the required monotonicity constraint (ie. the decoder must pay attention to each character once in sequential increasing order). As a result, the attention is not robust. It often skips words, repeats words, or encounters catastrophic failures where the output becomes unintelligable.\n", + "A large weakness of the original Tacotron 2 model is its attention mechanism, which does not enforce the required monotonicity constraint (ie. the decoder must pay attention to each character once in sequential increasing order). As a result, the attention is not robust. It often skips words, repeats words, or encounters catastrophic failures where the output becomes unintelligible.\n", "\n", "There are some attention mechanisms such as [forward attention](https://arxiv.org/abs/1807.06736) which try to address this.\n", "\n", @@ -1416,33 +1421,33 @@ "\n", "Replacing the attention mechanism in Tacotron 2 with duration prediction, eg. [Non-Attentive Tacotron](https://arxiv.org/abs/2010.04301), has historically been a common and necessary optimization to make it robust enough for use in enterprise applications. Though it gained visibility in academic literature primarily due to its use in modern transformer based model architectures such as [FastSpeech](https://arxiv.org/abs/1905.09263) and [FastPitch](https://fastpitch.github.io/).\n", "\n", - "The biggest drawback of this approach is that you you need to get the ground truth character duration information. Some methods for doing this are:\n", + "The biggest drawback of this approach is that you need to get the ground truth character duration information. Some methods for doing this are:\n", "\n", "1. The preferred method in NeMo is to Jointly train an [alignment model](https://arxiv.org/abs/2108.10447) that measures the similarity between characters and spectrogram frames.\n", "2. Run forced alignment, such as with the [Montreal Forced Aligner](https://montreal-forced-aligner.readthedocs.io/en/latest/).\n", "3. Infer the duration information from the attention map of a teacher model, such as Tacotron 2." - ], - "metadata": { - "id": "O6uH8q-BZjko", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "markdown", - "source": [ - "## 7.8 Parallel Models\n" - ], "metadata": { "id": "Z7SfuEJK6176", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 7.8 Parallel Models\n" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "XOtPiRajZG2Z", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "There are some significant weaknesses to auto-regressive systems. Most notably:\n", "\n", @@ -1451,25 +1456,19 @@ "* The user has little control over how the sentence is spoken.\n", "\n", "Using duration prediction enables us to remove the auto-regressive inference and predict every spectrogram frame in parallel. This makes the inference speed up to 100x faster, making it highly preferable for deploying and serving to users." - ], - "metadata": { - "id": "XOtPiRajZG2Z", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "markdown", - "source": [ - "### 7.8.1 FastPitch" - ], "metadata": { "id": "HgMfSDW5ZaE4", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "### 7.8.1 FastPitch" + ] }, { "cell_type": "markdown", @@ -1502,15 +1501,15 @@ }, { "cell_type": "markdown", - "source": [ - "Let's run the same inference for FastPitch that we did with Tacotron2. The main difference is loading the FastPitch checkpoint using the `FastPitchModel` class." - ], "metadata": { "id": "UN_SIcPuBcQw", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "Let's run the same inference for FastPitch that we did with Tacotron2. The main difference is loading the FastPitch checkpoint using the `FastPitchModel` class." + ] }, { "cell_type": "code", @@ -1573,34 +1572,42 @@ }, { "cell_type": "markdown", - "source": [ - "### 7.8.2 Drawbacks" - ], "metadata": { "id": "vwD3Xhwhoys0", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "### 7.8.2 Drawbacks" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "3jHNDSGmo5f9", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "One weakness of parallel models is that without auto-regressive teacher forcing, the model is unable to reliably predict/reconstruct the original utterance. Primarily due to the inputs not fully capturing the unpredictable variability/ambiguity in the possible outputs. The result is that the model learns an average over possible outputs, creating spectrograms that look unrealistically \"smooth\", degrading the audio quality (https://arxiv.org/abs/2202.13066).\n", "\n", "This problem can be partially alleviated by fine-tuning the spectrogram inversion model (described in the next section) directly on the predicted spectrograms.\n", "\n", "To visualize this, let's compare a spectrogram to the corresponding one predicted by FastPitch." - ], - "metadata": { - "id": "3jHNDSGmo5f9", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jvHCe1NWplZo", + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], "source": [ "# Compute real spectrogram\n", "audio_path = \"LJ023-0089.wav\"\n", @@ -1619,18 +1626,18 @@ "tokens = fastpitch_model.parse(text, normalize=True)\n", "predicted_spectrogram = fastpitch_model.generate_spectrogram(tokens=tokens)\n", "predicted_spectrogram = predicted_spectrogram.cpu().detach().numpy()[0]" - ], + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { - "id": "jvHCe1NWplZo", + "id": "W_PiDO1Dqezk", "pycharm": { "name": "#%%\n" } }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "code", + "outputs": [], "source": [ "# Compare the spectrograms\n", "imshow(real_spectrogram, origin=\"lower\")\n", @@ -1640,42 +1647,40 @@ "imshow(predicted_spectrogram, origin=\"lower\")\n", "plt.title(\"Predicted Spectrogram\")\n", "plt.show()" - ], - "metadata": { - "id": "W_PiDO1Dqezk", - "pycharm": { - "name": "#%%\n" - } - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "As we can see, the predicted spectrogram looks very smooth and well-behaved compared to the ground truth which has a more variation and detail." - ], "metadata": { "id": "_a161gPwreAu", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "As we can see, the predicted spectrogram looks very smooth and well-behaved compared to the ground truth which has a more variation and detail." + ] }, { "cell_type": "markdown", - "source": [ - "## 7.9 Research" - ], "metadata": { "id": "55BH3c8Pre4l", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 7.9 Research" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Yg2JKIqQrlCG", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "There is ongoing research into improving the audio quality and expressiveness of models like FastPitch, with a few methods that have shown promising results being:\n", "\n", @@ -1683,13 +1688,7 @@ "2. Use [normalizing flows](https://arxiv.org/abs/1908.09257) (sometimes called *glow* models) to directly learn the variability in the training data (eg. [RAD-TTS](https://nv-adlr.github.io/RADTTS)).\n", "3. Use [generative adversarial networks](https://en.wikipedia.org/wiki/Generative_adversarial_network) (GAN) based training to make the predicted spectrograms harder to tell apart from real spectrograms.\n", "4. Avoid the spectrogram entirely by training an end-to-end model that can go directly from text to audio (eg. [VITS](https://arxiv.org/pdf/2106.06103.pdf))." - ], - "metadata": { - "id": "Yg2JKIqQrlCG", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "markdown", @@ -1736,15 +1735,15 @@ }, { "cell_type": "markdown", - "source": [ - "Here we will take our audio file, compute its mel spectrogram, and then regenerate the origianl audio from the spectrogram using HiFiGan." - ], "metadata": { "id": "iHijlV2vfAzS", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "Here we will take our audio file, compute its mel spectrogram, and then regenerate the original audio from the spectrogram using HiFiGan." + ] }, { "cell_type": "code", @@ -1794,18 +1793,24 @@ }, { "cell_type": "markdown", - "source": [ - "## 8.2 Modeling approach" - ], "metadata": { "id": "Euxpd50wieyD", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 8.2 Modeling approach" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "9EUITiXCiWkS", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "Spectrogram inversion is a *sequence-to-sequence* problem.\n", "\n", @@ -1826,25 +1831,19 @@ "Or if your stride is a power of 2 (like the ones we selected) then you can upsample the sequence more effectively using *transposed convolutions* (aka. *deconvolutional layers*).\n", "\n", "Once the input and output sequences are the same length, you can use any number of models to predict the output." - ], - "metadata": { - "id": "9EUITiXCiWkS", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "markdown", - "source": [ - "## 8.3 WaveNet\n" - ], "metadata": { "id": "Ri1_RURKjiss", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 8.3 WaveNet\n" + ] }, { "cell_type": "markdown", @@ -1871,15 +1870,15 @@ }, { "cell_type": "markdown", - "source": [ - "## 8.4 HiFi-GAN" - ], "metadata": { "id": "PYXZjgEEjndF", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "## 8.4 HiFi-GAN" + ] }, { "cell_type": "markdown", @@ -1942,18 +1941,24 @@ }, { "cell_type": "markdown", - "source": [ - "# 9. Model Evaluation" - ], "metadata": { "id": "aozxSufVJa0l", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "# 9. Model Evaluation" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "I8522HduJmHM", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "There are no well-established objective metrics for evaluating how good a TTS model is. Rather, quality is usually based on human opinion or perception, commonly measured through surveys.\n", "\n", @@ -1964,28 +1969,28 @@ "There are some metrics which are occasionally used to try and measure audio quality such as [MCD-DTW](https://github.com/MattShannon/mcd), [PESQ](https://en.wikipedia.org/wiki/Perceptual_Evaluation_of_Speech_Quality), and [STOI](https://torchmetrics.readthedocs.io/en/stable/audio/short_time_objective_intelligibility.html). But these have very limited accuracy and usefulness.\n", "\n", "The lack of objective numerical metrics that can be trained on is a large reason as to why many state of the art models rely on GAN based training to get good quality." - ], - "metadata": { - "id": "I8522HduJmHM", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "markdown", - "source": [ - "# 10. Additional Resources" - ], "metadata": { "id": "OgtWptQ5tGlq", "pycharm": { "name": "#%% md\n" } - } + }, + "source": [ + "# 10. Additional Resources" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "wtJINtrStHvJ", + "pycharm": { + "name": "#%% md\n" + } + }, "source": [ "To learn more about what TTS technology and models are available in NeMo, please look through our [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/tts/intro.html#).\n", "\n", @@ -1995,13 +2000,7 @@ "* FastPitch [training](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tts/FastPitch_MixerTTS_Training.ipynb) and [fine-tuning](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tts/FastPitch_Finetuning.ipynb)\n", "\n", "To learn how to deploy and serve your TTS models, visit [Riva](https://docs.nvidia.com/deeplearning/riva/index.html)." - ], - "metadata": { - "id": "wtJINtrStHvJ", - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "markdown", @@ -2050,12 +2049,22 @@ "gpuClass": "standard", "kernelspec": { "display_name": "Python 3", + "language": "python", "name": "python3" }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 } From 7a072ff1a286500c7dcc602f6f28992436779864 Mon Sep 17 00:00:00 2001 From: Matvei Novikov Date: Fri, 28 Oct 2022 21:32:31 +0400 Subject: [PATCH 151/244] Pcla tutorial fixes (#5271) * Fixed typos Signed-off-by: Matvei Novikov * Fixed cell type and tatoeba reference Signed-off-by: Matvei Novikov * Fixed typo Signed-off-by: Matvei Novikov * Fixed branch variable Signed-off-by: Matvei Novikov Signed-off-by: Matvei Novikov --- ...ion_and_Capitalization_Lexical_Audio.ipynb | 23 ++++++++++--------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index 20333de59b38..dc78ddcc7408 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -99,7 +99,7 @@ "- whether the word should be capitalized\n", "\n", "\n", - "In some cases lexical only model can't predict punctutation correctly without audio. It is especially hard for conversational speech.\n", + "In some cases lexical only model can't predict punctuation correctly without audio. It is especially hard for conversational speech.\n", "\n", "For example:\n", "\n", @@ -119,7 +119,7 @@ "## Architecture\n", "Punctuation and capitaalization lexical audio model is based on [Multimodal Semi-supervised Learning Framework for Punctuation Prediction in Conversational Speech](https://arxiv.org/pdf/2008.00702.pdf). Model consists of lexical encoder (BERT-like model), acoustic encoder (i.e. Conformer's audio encoder), fusion of lexical and audio features (attention based fusion) and prediction layers.\n", "\n", - "Fusion is needed because encoded text and audio might have different length therfore can't be alligned one-to-one. As model predicts punctuation and capitalization per text token we use cross-attention between encoded lexical and encoded audio input." + "Fusion is needed because encoded text and audio might have different length therefore can't be aligned one-to-one. As model predicts punctuation and capitalization per text token we use cross-attention between encoded lexical and encoded audio input." ] }, { @@ -279,22 +279,23 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "outputs": [], "source": [ - "## download get_tatoeba_data.py script to download and preprocess the Tatoeba data\n", + "## download get_libritts_data.py script to download and preprocess the LibriTTS data\n", "os.makedirs(WORK_DIR, exist_ok=True)\n", "if not os.path.exists(WORK_DIR + '/get_libritts_data.py'):\n", " print('Downloading get_libritts_data.py...')\n", " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/token_classification/data/get_libritts_data.py', WORK_DIR)\n", "else:\n", " print ('get_libritts_data.py already exists')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "code", From 498b61d7bf8458a69da293c745810c2be059d2b3 Mon Sep 17 00:00:00 2001 From: Zhilin Wang Date: Fri, 28 Oct 2022 18:25:51 -0700 Subject: [PATCH 152/244] Fix bug into Dialogue tutorial (#5277) --- examples/nlp/dialogue/dialogue.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nlp/dialogue/dialogue.py b/examples/nlp/dialogue/dialogue.py index 894937a6b365..0da5ae5717da 100644 --- a/examples/nlp/dialogue/dialogue.py +++ b/examples/nlp/dialogue/dialogue.py @@ -66,7 +66,7 @@ def main(cfg: DictConfig) -> None: logging.info(f'Config: {OmegaConf.to_yaml(cfg)}') try: - strategy = NLPDDPStrategy(no_ddp_communication_hook=True, find_unused_parameters=False,) + strategy = NLPDDPStrategy(no_ddp_communication_hook=True, find_unused_parameters=True,) except (ImportError, ModuleNotFoundError): strategy = None From 80bf342e965d5a7c7469df3ab3c7b3e0460e4fff Mon Sep 17 00:00:00 2001 From: Matvei Novikov Date: Mon, 31 Oct 2022 21:02:27 +0400 Subject: [PATCH 153/244] Typo fix (#5288) Signed-off-by: Matvei Novikov Signed-off-by: Matvei Novikov --- ...ion_and_Capitalization_Lexical_Audio.ipynb | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index dc78ddcc7408..fb544c24e0a2 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -117,7 +117,7 @@ }, "source": [ "## Architecture\n", - "Punctuation and capitaalization lexical audio model is based on [Multimodal Semi-supervised Learning Framework for Punctuation Prediction in Conversational Speech](https://arxiv.org/pdf/2008.00702.pdf). Model consists of lexical encoder (BERT-like model), acoustic encoder (i.e. Conformer's audio encoder), fusion of lexical and audio features (attention based fusion) and prediction layers.\n", + "Punctuation and capitalization lexical audio model is based on [Multimodal Semi-supervised Learning Framework for Punctuation Prediction in Conversational Speech](https://arxiv.org/pdf/2008.00702.pdf). Model consists of lexical encoder (BERT-like model), acoustic encoder (i.e. Conformer's audio encoder), fusion of lexical and audio features (attention based fusion) and prediction layers.\n", "\n", "Fusion is needed because encoded text and audio might have different length therefore can't be aligned one-to-one. As model predicts punctuation and capitalization per text token we use cross-attention between encoded lexical and encoded audio input." ] @@ -279,14 +279,7 @@ ] }, { - "cell_type": "code", - "execution_count": null, - "metadata": { - "pycharm": { - "name": "#%% md\n" - } - }, - "outputs": [], + "cell_type": "markdown", "source": [ "## download get_libritts_data.py script to download and preprocess the LibriTTS data\n", "os.makedirs(WORK_DIR, exist_ok=True)\n", @@ -295,7 +288,13 @@ " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/token_classification/data/get_libritts_data.py', WORK_DIR)\n", "else:\n", " print ('get_libritts_data.py already exists')" - ] + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%% md\n" + } + } }, { "cell_type": "code", From 26e3e1d1931a9f34986f73f071f82f446d90c1c5 Mon Sep 17 00:00:00 2001 From: Zhilin Wang Date: Tue, 1 Nov 2022 15:15:54 -0700 Subject: [PATCH 154/244] Fix dialogue tutorial bug (#5297) * set add_pooling_layer=False for huggingface bert model * remove add_pooling_layer=False and set find_unused_parameters=True * set num_prompt_tokens to 0 for huggingface --- .../nlp/models/dialogue/dialogue_gpt_classification_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/collections/nlp/models/dialogue/dialogue_gpt_classification_model.py b/nemo/collections/nlp/models/dialogue/dialogue_gpt_classification_model.py index dcf461d5334f..9608a0320bd6 100644 --- a/nemo/collections/nlp/models/dialogue/dialogue_gpt_classification_model.py +++ b/nemo/collections/nlp/models/dialogue/dialogue_gpt_classification_model.py @@ -539,6 +539,7 @@ def generate_candidates(self, labels, template_length, input_ids, attn_masks): for i in generated_tokens ] generated_tokens = torch.cat(generated_tokens, axis=0) + num_prompt_tokens = 0 elif self.cfg.library == "megatron": From 42f6ac9c51090bbe72cef89df52053c5c9473f66 Mon Sep 17 00:00:00 2001 From: fayejf <36722593+fayejf@users.noreply.github.com> Date: Fri, 4 Nov 2022 10:04:41 -0700 Subject: [PATCH 155/244] small bugfix for r1.13.0 (#5310) * typo fix Signed-off-by: fayejf * udpate transcribe Signed-off-by: fayejf Signed-off-by: fayejf --- examples/asr/transcribe_speech.py | 1 + nemo/collections/asr/parts/utils/transcribe_utils.py | 8 ++++---- .../speaker_tasks/Speaker_Diarization_Training.ipynb | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/examples/asr/transcribe_speech.py b/examples/asr/transcribe_speech.py index b22f86d9ba29..075fadd825f7 100644 --- a/examples/asr/transcribe_speech.py +++ b/examples/asr/transcribe_speech.py @@ -244,6 +244,7 @@ def autocast(): path2manifest=cfg.dataset_manifest, batch_size=cfg.batch_size, num_workers=cfg.num_workers, + return_hypotheses=return_hypotheses, ) else: logging.warning( diff --git a/nemo/collections/asr/parts/utils/transcribe_utils.py b/nemo/collections/asr/parts/utils/transcribe_utils.py index 244231ca92f8..556872ec12f8 100644 --- a/nemo/collections/asr/parts/utils/transcribe_utils.py +++ b/nemo/collections/asr/parts/utils/transcribe_utils.py @@ -74,16 +74,16 @@ def transcribe_partial_audio( lg = logits[idx][: logits_len[idx]] hypotheses.append(lg.cpu().numpy()) else: - current_hypotheses, _ = asr_model._wer.decoding.ctc_decoder_predictions_tensor( - decoder_outputs=greedy_predictions, - decoder_lengths=logits_len, - return_hypotheses=return_hypotheses, + current_hypotheses, all_hyp = asr_model.decoding.ctc_decoder_predictions_tensor( + logits, decoder_lengths=logits_len, return_hypotheses=return_hypotheses, ) if return_hypotheses: # dump log probs per file for idx in range(logits.shape[0]): current_hypotheses[idx].y_sequence = logits[idx][: logits_len[idx]] + if current_hypotheses[idx].alignments is None: + current_hypotheses[idx].alignments = current_hypotheses[idx].y_sequence hypotheses += current_hypotheses diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb index 77041398edb5..c401591ea319 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb @@ -197,7 +197,7 @@ "\n", "- Please skip this section and go directly to [Prepare Training data for MSDD](#Prepare-Training-data-for-MSDD) section if you have your own speaker diarization dataset. \n", "\n", - "In this tutorial, we use [NeMo Multispeaker Simulator](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tools/Multispeaker_Simulator.ipynb) and the Librispeech corpus to generate a toy training dataset for demonstration purpose. You can replace the simulated dataset with your own datasets if you have proper speaker annotations (RTTM files) for the dataset. If you do not have access to any speaker diarization datasets, you can use NeMo [NeMo Multispeaker Simulator](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tools/Multispeaker_Simulator.ipynb) by generating a good amount of data samples to meet your needs. \n", + "In this tutorial, we use [NeMo Multispeaker Simulator](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tools/Multispeaker_Simulator.ipynb) and the Librispeech corpus to generate a toy training dataset for demonstration purpose. You can replace the simulated dataset with your own datasets if you have proper speaker annotations (RTTM files) for the dataset. If you do not have access to any speaker diarization datasets, you can use [NeMo Multispeaker Simulator](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tools/Multispeaker_Simulator.ipynb) by generating a good amount of data samples to meet your needs. \n", "\n", "For more details regarding data simulator, please follow the descriptions in [NeMo Multispeaker Simulator](https://github.com/NVIDIA/NeMo/blob/main/tutorials/tools/Multispeaker_Simulator.ipynb) and we will not cover configurations and detailed process of data simulation in this tutorial. \n" ] @@ -599,7 +599,7 @@ "\n", "Before we generate a manifest file and RTTM files for training MSDD, you have to determine:\n", "\n", - "- `window`: the windowl length of the base scale (the shortest scale)\n", + "- `window`: the window length of the base scale (the shortest scale)\n", "- `shift`: the hop-length of the base scale (the shortest scale)\n", "- `step_count`: how many decision steps in one data sample\n", "\n", From df959235e07b2de0ee86d3dfccedca1e2bf59e92 Mon Sep 17 00:00:00 2001 From: Igor Gitman Date: Fri, 4 Nov 2022 10:44:25 -0700 Subject: [PATCH 156/244] Add italian model checkpoints (#5316) Signed-off-by: Igor Gitman Signed-off-by: Igor Gitman --- nemo/collections/asr/models/ctc_bpe_models.py | 7 +++++++ nemo/collections/asr/models/rnnt_bpe_models.py | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/nemo/collections/asr/models/ctc_bpe_models.py b/nemo/collections/asr/models/ctc_bpe_models.py index 71421abd9eca..c4c273d6f462 100644 --- a/nemo/collections/asr/models/ctc_bpe_models.py +++ b/nemo/collections/asr/models/ctc_bpe_models.py @@ -635,4 +635,11 @@ def list_available_models(cls) -> Optional[PretrainedModelInfo]: ) results.append(model) + model = PretrainedModelInfo( + pretrained_model_name="stt_it_conformer_ctc_large", + description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_conformer_ctc_large", + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_conformer_ctc_large/versions/1.13.0/files/stt_it_conformer_ctc_large.nemo", + ) + results.append(model) + return results diff --git a/nemo/collections/asr/models/rnnt_bpe_models.py b/nemo/collections/asr/models/rnnt_bpe_models.py index 352c64f2ac28..c0a7fe8e7f46 100644 --- a/nemo/collections/asr/models/rnnt_bpe_models.py +++ b/nemo/collections/asr/models/rnnt_bpe_models.py @@ -226,6 +226,13 @@ def list_available_models(cls) -> List[PretrainedModelInfo]: ) results.append(model) + model = PretrainedModelInfo( + pretrained_model_name="stt_it_conformer_transducer_large", + description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_it_conformer_transducer_large", + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_it_conformer_transducer_large/versions/1.13.0/files/stt_it_conformer_transducer_large.nemo", + ) + results.append(model) + return results def __init__(self, cfg: DictConfig, trainer: Trainer = None): From fbd17adfc3b3d81ed24145039819931debc5597f Mon Sep 17 00:00:00 2001 From: Sasha Meister <117230141+ssh-meister@users.noreply.github.com> Date: Mon, 7 Nov 2022 23:44:14 +0400 Subject: [PATCH 157/244] [STT] Add Ru ASR Conformer-CTC and Conformer-Transducer (#5340) * [STT] Add stt_ru_conformer_ctc_large Signed-off-by: Sasha Meister <117230141+ssh-meister@users.noreply.github.com> * [STT] Add stt_ru_conformer_transducer_large Add stt_ru_conformer_transducer_large Signed-off-by: Sasha Meister <117230141+ssh-meister@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Sasha Meister <117230141+ssh-meister@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- nemo/collections/asr/models/ctc_bpe_models.py | 7 +++++++ nemo/collections/asr/models/rnnt_bpe_models.py | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/nemo/collections/asr/models/ctc_bpe_models.py b/nemo/collections/asr/models/ctc_bpe_models.py index c4c273d6f462..01e2ee1fed74 100644 --- a/nemo/collections/asr/models/ctc_bpe_models.py +++ b/nemo/collections/asr/models/ctc_bpe_models.py @@ -642,4 +642,11 @@ def list_available_models(cls) -> Optional[PretrainedModelInfo]: ) results.append(model) + model = PretrainedModelInfo( + pretrained_model_name="stt_ru_conformer_ctc_large", + description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_conformer_ctc_large", + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_conformer_ctc_large/versions/1.13.0/files/stt_ru_conformer_ctc_large.nemo", + ) + results.append(model) + return results diff --git a/nemo/collections/asr/models/rnnt_bpe_models.py b/nemo/collections/asr/models/rnnt_bpe_models.py index c0a7fe8e7f46..81f0a3b49da5 100644 --- a/nemo/collections/asr/models/rnnt_bpe_models.py +++ b/nemo/collections/asr/models/rnnt_bpe_models.py @@ -233,6 +233,13 @@ def list_available_models(cls) -> List[PretrainedModelInfo]: ) results.append(model) + model = PretrainedModelInfo( + pretrained_model_name="stt_ru_conformer_transducer_large", + description="For details about this model, please visit https://ngc.nvidia.com/catalog/models/nvidia:nemo:stt_ru_conformer_transducer_large", + location="https://api.ngc.nvidia.com/v2/models/nvidia/nemo/stt_ru_conformer_transducer_large/versions/1.13.0/files/stt_ru_conformer_transducer_large.nemo", + ) + results.append(model) + return results def __init__(self, cfg: DictConfig, trainer: Trainer = None): From 8184beab3b0fa071747e886c04fe295bacb9044d Mon Sep 17 00:00:00 2001 From: Matvei Novikov Date: Tue, 8 Nov 2022 04:49:05 +0400 Subject: [PATCH 158/244] Pcla tutorial fixes (#5313) * fixes Signed-off-by: Matvei Novikov * fixes Signed-off-by: Matvei Novikov * moved `create_text_and_labels` to token_classification_utils.py Signed-off-by: Matvei Novikov Signed-off-by: Matvei Novikov --- .../data/get_libritts_data.py | 2 +- .../data/get_tatoeba_data.py | 64 +----------------- .../token_classification_utils.py | 66 ++++++++++++++++++- ...ion_and_Capitalization_Lexical_Audio.ipynb | 23 +++---- 4 files changed, 79 insertions(+), 76 deletions(-) diff --git a/examples/nlp/token_classification/data/get_libritts_data.py b/examples/nlp/token_classification/data/get_libritts_data.py index 6b83b1fbaea2..86a5d01eb9dc 100644 --- a/examples/nlp/token_classification/data/get_libritts_data.py +++ b/examples/nlp/token_classification/data/get_libritts_data.py @@ -24,9 +24,9 @@ import subprocess import tarfile -from examples.nlp.token_classification.data.get_tatoeba_data import create_text_and_labels from tqdm import tqdm +from nemo.collections.nlp.data.token_classification.token_classification_utils import create_text_and_labels from nemo.utils import logging URL = { diff --git a/examples/nlp/token_classification/data/get_tatoeba_data.py b/examples/nlp/token_classification/data/get_tatoeba_data.py index 727848b550ab..6a4cd23b249d 100644 --- a/examples/nlp/token_classification/data/get_tatoeba_data.py +++ b/examples/nlp/token_classification/data/get_tatoeba_data.py @@ -17,9 +17,9 @@ import os import random import re -import string import subprocess +from nemo.collections.nlp.data.token_classification.token_classification_utils import create_text_and_labels from nemo.utils import logging URL = {'tatoeba': 'https://downloads.tatoeba.org/exports/sentences.csv'} @@ -120,68 +120,6 @@ def __split_into_train_dev(in_file: str, train_file: str, dev_file: str, percent dev_file.write(' '.join(lines[-dev_size:])) -def remove_punctuation(word: str): - """ - Removes all punctuation marks from a word except for ' - that is often a part of word: don't, it's, and so on - """ - all_punct_marks = string.punctuation.replace("'", '') - return re.sub('[' + all_punct_marks + ']', '', word) - - -def create_text_and_labels(output_dir: str, file_path: str, punct_marks: str = ',.?'): - """ - Create datasets for training and evaluation. - - Args: - output_dir: path to the output data directory - file_path: path to file name - punct_marks: supported punctuation marks - - The data will be split into 2 files: text.txt and labels.txt. \ - Each line of the text.txt file contains text sequences, where words\ - are separated with spaces. The labels.txt file contains \ - corresponding labels for each word in text.txt, the labels are \ - separated with spaces. Each line of the files should follow the \ - format: \ - [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \ - [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).' - """ - if not os.path.exists(file_path): - raise ValueError(f'{file_path} not found') - - os.makedirs(output_dir, exist_ok=True) - - base_name = os.path.basename(file_path) - labels_file = os.path.join(output_dir, 'labels_' + base_name) - text_file = os.path.join(output_dir, 'text_' + base_name) - - with open(file_path, 'r') as f: - with open(text_file, 'w') as text_f: - with open(labels_file, 'w') as labels_f: - for line in f: - line = line.split() - text = '' - labels = '' - for word in line: - label = word[-1] if word[-1] in punct_marks else 'O' - word = remove_punctuation(word) - if len(word) > 0: - if word[0].isupper(): - label += 'U' - else: - label += 'O' - - word = word.lower() - text += word + ' ' - labels += label + ' ' - - text_f.write(text.strip() + '\n') - labels_f.write(labels.strip() + '\n') - - print(f'{text_file} and {labels_file} created from {file_path}.') - - def __delete_file(file_to_del: str): """ Deletes the file diff --git a/nemo/collections/nlp/data/token_classification/token_classification_utils.py b/nemo/collections/nlp/data/token_classification/token_classification_utils.py index 828ef1180e0b..94acd69d3b11 100644 --- a/nemo/collections/nlp/data/token_classification/token_classification_utils.py +++ b/nemo/collections/nlp/data/token_classification/token_classification_utils.py @@ -14,6 +14,8 @@ import os import pickle +import re +import string from typing import Dict from nemo.collections.nlp.data.data_utils.data_preprocessing import ( @@ -23,7 +25,69 @@ ) from nemo.utils import logging -__all__ = ['get_label_ids'] +__all__ = ['get_label_ids', 'create_text_and_labels'] + + +def remove_punctuation(word: str): + """ + Removes all punctuation marks from a word except for ' + that is often a part of word: don't, it's, and so on + """ + all_punct_marks = string.punctuation.replace("'", '') + return re.sub('[' + all_punct_marks + ']', '', word) + + +def create_text_and_labels(output_dir: str, file_path: str, punct_marks: str = ',.?'): + """ + Create datasets for training and evaluation. + + Args: + output_dir: path to the output data directory + file_path: path to file name + punct_marks: supported punctuation marks + + The data will be split into 2 files: text.txt and labels.txt. \ + Each line of the text.txt file contains text sequences, where words\ + are separated with spaces. The labels.txt file contains \ + corresponding labels for each word in text.txt, the labels are \ + separated with spaces. Each line of the files should follow the \ + format: \ + [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \ + [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).' + """ + if not os.path.exists(file_path): + raise ValueError(f'{file_path} not found') + + os.makedirs(output_dir, exist_ok=True) + + base_name = os.path.basename(file_path) + labels_file = os.path.join(output_dir, 'labels_' + base_name) + text_file = os.path.join(output_dir, 'text_' + base_name) + + with open(file_path, 'r') as f: + with open(text_file, 'w') as text_f: + with open(labels_file, 'w') as labels_f: + for line in f: + line = line.split() + text = '' + labels = '' + for word in line: + label = word[-1] if word[-1] in punct_marks else 'O' + word = remove_punctuation(word) + if len(word) > 0: + if word[0].isupper(): + label += 'U' + else: + label += 'O' + + word = word.lower() + text += word + ' ' + labels += label + ' ' + + text_f.write(text.strip() + '\n') + labels_f.write(labels.strip() + '\n') + + print(f'{text_file} and {labels_file} created from {file_path}.') def get_label_ids( diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index fb544c24e0a2..4c20cae8af19 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -279,22 +279,23 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "outputs": [], "source": [ - "## download get_libritts_data.py script to download and preprocess the LibriTTS data\n", + "# download get_libritts_data.py script to download and preprocess the LibriTTS data\n", "os.makedirs(WORK_DIR, exist_ok=True)\n", "if not os.path.exists(WORK_DIR + '/get_libritts_data.py'):\n", " print('Downloading get_libritts_data.py...')\n", " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/token_classification/data/get_libritts_data.py', WORK_DIR)\n", "else:\n", " print ('get_libritts_data.py already exists')" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%% md\n" - } - } + ] }, { "cell_type": "code", @@ -996,9 +997,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0" + "version": "3.9.13" } }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} From b791efc93fd103190336ee3b26a4da010e0613d9 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 8 Nov 2022 03:12:50 -0800 Subject: [PATCH 159/244] a lot of refactoring --- examples/tts/conf/vits.yaml | 143 +++-- examples/tts/conf/vits_44100.yaml | 219 +++++++ examples/tts/vits.py | 5 - .../asr/parts/preprocessing/features.py | 6 +- .../tokenizers/text_to_speech/ipa_lexicon.py | 7 + .../text_to_speech/tts_tokenizers.py | 9 +- nemo/collections/tts/helpers/helpers.py | 169 +++--- nemo/collections/tts/helpers/splines.py | 186 +++++- nemo/collections/tts/models/vits.py | 292 ++++------ nemo/collections/tts/models/vits_test.py | 434 -------------- nemo/collections/tts/modules/vits_modules.py | 539 ++---------------- nemo/collections/tts/torch/data.py | 417 ++++---------- nemo/collections/tts/torch/g2ps.py | 281 --------- 13 files changed, 812 insertions(+), 1895 deletions(-) create mode 100644 examples/tts/conf/vits_44100.yaml delete mode 100644 nemo/collections/tts/models/vits_test.py diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 7debebfb059d..be6d9040a5dc 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -6,61 +6,48 @@ name: VITS -train_dataset: "../datasets/ljspeech_ds/LJSpeech-1.1/train_manifest.json" -# train_dataset: "raid/datasets/tts_data/train_manifest.json" - -validation_datasets: "../datasets/ljspeech_ds/LJSpeech-1.1/val_manifest.json" -# validation_datasets: "raid/datasets/tts_data/val_manifest.json" +train_dataset: ??? +validation_datasets: ??? sup_data_path: null sup_data_types: null -# checkpoint_path: 'checkpoint' -checkpoint_path: null - -phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.01" -heteronyms_path: "scripts/tts_dataset_files/heteronyms-030921" +phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.08.txt" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" +# Default values from librosa.pyin +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +sample_rate: 22050 +n_mel_channels: 80 +n_window_size: 1024 +n_window_stride: 256 +n_fft: 1024 +lowfreq: 0 +highfreq: null +window: hann model: - pitch_fmin: 65.40639132514966 - pitch_fmax: 2093.004522404789 - - sample_rate: 22050 - n_mel_channels: 80 - n_window_size: 1024 - n_window_stride: 256 - n_fft: 1024 - lowfreq: 0 - highfreq: null - window: hann - - splice_length: 64 - lr: 2e-4 - n_speakers: 1 - symbols_embedding_dim: 384 - max_token_duration: 75 - pitch_embedding_kernel_size: 3 - - seed: 1234 - betas: [0.8,0.99] - eps: 1e-9 - lr_decay: 0.999875 + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + sample_rate: ${sample_rate} + n_mel_channels: ${n_mel_channels} + n_window_size: ${n_window_size} + n_window_stride: ${n_window_stride} + n_fft: ${n_fft} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + window: ${window} + mel_fmin: 0.0 + mel_fmax: null + + n_speakers: 0 segment_size: 8192 - init_lr_ratio: 1 - warmup_epochs: 0 c_mel: 45 c_kl: 1. - inter_channels: 192 - hidden_channels: 192 - filter_channels: 768 - n_heads: 2 - p_dropout: 0.1 - n_layers_q: 3 - n_layers: 6 use_spectral_norm: false - mel_fmin: 0.0 - mel_fmax: null text_normalizer: _target_: nemo_text_processing.text_normalization.normalize.Normalizer @@ -74,26 +61,20 @@ model: punct_post_process: true text_tokenizer: - _target_: nemo.collections.tts.torch.tts_tokenizers.IPAPhonemesTokenizer + _target_: nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.IPATokenizer punct: true - stresses: true - chars: true apostrophe: true pad_with_space: false + sep_with_space: true g2p: - _target_: nemo.collections.tts.torch.g2ps.IPAG2p - strip: true - # text_tokenizer: - # _target_: nemo.collections.tts.torch.tts_tokenizers.EnglishPhonemesTokenizer - # punct: true - # stresses: true - # chars: true - # apostrophe: true - # pad_with_space: true - # g2p: - # _target_: nemo.collections.tts.torch.g2ps.EnglishG2p - # phoneme_dict: ${phoneme_dict_path} - # heteronyms: ${heteronyms_path} + _target_: nemo_text_processing.g2p.modules.IPAG2P + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + phoneme_probability: 0.8 + # Relies on the heteronyms list for anything that needs to be disambiguated + ignore_ambiguous_words: false + use_chars: true + use_stresses: true train_ds: dataset: @@ -115,13 +96,8 @@ model: trim: False pitch_fmin: ${model.pitch_fmin} pitch_fmax: ${model.pitch_fmax} - add_blank: true - dataloader_params: - # drop_last: false - # shuffle: true - # batch_size: 32 num_workers: 8 pin_memory: false @@ -131,7 +107,6 @@ model: num_replicas: ${trainer.devices} shuffle: true - validation_ds: dataset: _target_: "nemo.collections.tts.torch.data.TTSDataset" @@ -152,7 +127,6 @@ model: trim: False pitch_fmin: ${model.pitch_fmin} pitch_fmax: ${model.pitch_fmax} - add_blank: true dataloader_params: drop_last: false @@ -184,17 +158,35 @@ model: nb_augmentation_prob : 0 mag_power: 1.0 exact_pad: true - use_grads: false - - generator: - _target_: nemo.collections.tts.modules.vits_modules.Generator + use_grads: true + + synthesizer: + _target_: nemo.collections.tts.modules.vits_modules.SynthesizerTrn + inter_channels: 192 + hidden_channels: 192 + filter_channels: 768 + n_heads: 2 + n_layers: 6 + kernel_size: 3 + p_dropout: 0.1 resblock: "1" resblock_kernel_sizes: [3,7,11] resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] upsample_rates: [8,8,2,2] upsample_initial_channel: 512 upsample_kernel_sizes: [16,16,4,4] - initial_channel: 384 # initial_input_size: 384 + n_speakers: ${model.n_speakers} + gin_channels: 256 # for multi-speaker + + optim: + _target_: torch.optim.AdamW + lr: 2e-4 + betas: [0.9, 0.99] + eps: 1e-9 + + sched: + name: ExponentialLR + lr_decay: 0.999875 trainer: num_nodes: 1 @@ -205,16 +197,15 @@ trainer: # amp_backend: 'apex' # amp_level: 'O2' # benchmark: true - max_epochs: 1000000 + max_epochs: -1 accumulate_grad_batches: 1 - # gradient_clip_val: 1000.0 enable_checkpointing: false # Provided by exp_manager logger: false # Provided by exp_manager log_every_n_steps: 50 check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_orig_g2p + exp_dir: ../exps/vits_fp16_local name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -223,7 +214,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_orig_g2p + name: vits_fp16_local project: ${name} entity: nvidia resume: "allow" diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml new file mode 100644 index 000000000000..5646a608ed02 --- /dev/null +++ b/examples/tts/conf/vits_44100.yaml @@ -0,0 +1,219 @@ +# This config contains the default values for training VITS model on LJSpeech dataset. +# If you want to train model on other dataset, you can change config values according to your dataset. +# Most dataset-specific arguments are in the head of the config file, see below. + +# TODO: remove unnecessary arguments, refactoring + +name: VITS + +train_dataset: ??? +validation_datasets: ??? +sup_data_path: ??? +sup_data_types: [speaker_id] + +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +sample_rate: 44100 +n_mel_channels: 80 +n_window_size: 2048 +n_window_stride: 512 +n_fft: 2048 +lowfreq: 0 +highfreq: null +window: hann + +phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.08.txt" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722" +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" + +model: + n_speakers: 13000 + segment_size: 16384 + c_mel: 45 + c_kl: 1. + use_spectral_norm: false + + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + sample_rate: ${sample_rate} + n_mel_channels: ${n_mel_channels} + n_window_size: ${n_window_size} + n_window_stride: ${n_window_stride} + n_fft: ${n_fft} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + window: ${window} + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.IPATokenizer + punct: true + apostrophe: true + pad_with_space: false + sep_with_space: false + g2p: + _target_: nemo_text_processing.g2p.modules.IPAG2P + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + phoneme_probability: 0.8 + # Relies on the heteronyms list for anything that needs to be disambiguated + ignore_ambiguous_words: false + use_chars: true + use_stresses: true + + train_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} + + dataloader_params: + num_workers: 8 + pin_memory: false + + batch_sampler: + batch_size: 2 + boundaries: [32,300,400,500,600,700,800,900,1000] + num_replicas: ${trainer.devices} + shuffle: true + + validation_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${validation_datasets} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} + + dataloader_params: + drop_last: false + shuffle: false + batch_size: 2 + num_workers: 4 + pin_memory: false + + preprocessor: + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures + nfilt: ${model.n_mel_channels} + highfreq: ${model.highfreq} + log: true + log_zero_guard_type: clamp + log_zero_guard_value: 1e-05 + lowfreq: ${model.lowfreq} + n_fft: ${model.n_fft} + n_window_size: ${model.n_window_size} + n_window_stride: ${model.n_window_stride} + pad_to: 1 + pad_value: 0 + sample_rate: ${model.sample_rate} + window: ${model.window} + normalize: null + preemph: null + dither: 0.0 + frame_splicing: 1 + stft_conv: false + nb_augmentation_prob : 0 + mag_power: 1.0 + exact_pad: true + use_grads: true + + synthesizer: + _target_: nemo.collections.tts.modules.vits_modules.SynthesizerTrn + inter_channels: 192 + hidden_channels: 192 + filter_channels: 768 + n_heads: 2 + n_layers: 6 + kernel_size: 3 + p_dropout: 0.1 + resblock: "1" + resblock_kernel_sizes: [3,7,11] + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_rates: [8,8,4,2] + upsample_initial_channel: 512 + upsample_kernel_sizes: [16,16,4,4] + n_speakers: ${model.n_speakers} + gin_channels: 256 # for multi-speaker + + optim: + _target_: torch.optim.AdamW + lr: 2e-4 + betas: [0.9, 0.99] + eps: 1e-9 + + sched: + name: ExponentialLR + lr_decay: 0.999875 + +trainer: + num_nodes: 1 + devices: 2 + accelerator: gpu + strategy: ddp + precision: 32 + # amp_backend: 'apex' + # amp_level: 'O2' + # benchmark: true + max_epochs: -1 + accumulate_grad_batches: 1 + enable_checkpointing: false # Provided by exp_manager + logger: false # Provided by exp_manager + log_every_n_steps: 50 + check_val_every_n_epoch: 1 + +exp_manager: + exp_dir: ../exps/vits_hifitts_fp16_local + name: ${name} + create_tensorboard_logger: false + create_checkpoint_callback: true + checkpoint_callback_params: + monitor: loss_gen_all + mode: min + create_wandb_logger: true + wandb_logger_kwargs: + name: vits_hifitts_fp16_local + project: ${name} + entity: nvidia + resume: "allow" + resume_if_exists: false + resume_ignore_no_checkpoint: false diff --git a/examples/tts/vits.py b/examples/tts/vits.py index 45be264faecf..ad6018b1017e 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -23,11 +23,6 @@ @hydra_runner(config_path="conf", config_name="vits") def main(cfg): - # plugins = [] - # if cfg.trainer.precision in [16, 'bf16']: - # scaler = GradScaler(enabled=True) - # plugins.append(NativeMixedPrecisionPlugin(precision=cfg.trainer.precision, device='cuda', scaler=scaler)) - trainer = pl.Trainer(replace_sampler_ddp=False, **cfg.trainer) # trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) diff --git a/nemo/collections/asr/parts/preprocessing/features.py b/nemo/collections/asr/parts/preprocessing/features.py index 5684181f8637..dea1849c622d 100644 --- a/nemo/collections/asr/parts/preprocessing/features.py +++ b/nemo/collections/asr/parts/preprocessing/features.py @@ -331,7 +331,7 @@ def get_seq_len(self, seq_len): def filter_banks(self): return self.fb - def forward(self, x, seq_len): + def forward(self, x, seq_len, linear_spec=False): seq_len = self.get_seq_len(seq_len.float()) if self.stft_pad_amount is not None: @@ -367,6 +367,10 @@ def forward(self, x, seq_len): if self.mag_power != 1.0: x = x.pow(self.mag_power) + # return plain spectrogram if required + if linear_spec: + return x, seq_len + # dot with filterbank energies x = torch.matmul(self.fb.to(x.dtype), x) # log features if required diff --git a/nemo/collections/common/tokenizers/text_to_speech/ipa_lexicon.py b/nemo/collections/common/tokenizers/text_to_speech/ipa_lexicon.py index 48745a0da35c..32ff5dec6d2d 100644 --- a/nemo/collections/common/tokenizers/text_to_speech/ipa_lexicon.py +++ b/nemo/collections/common/tokenizers/text_to_speech/ipa_lexicon.py @@ -20,6 +20,13 @@ ')', '[', ']', '{', '}', ) +VITS_PUNCTUATION = ( + ',', '.', '!', '?', '-', + ':', ';', '"', '«', '»', + '“', '”', '¡', '¿', '—', + '…', +) + GRAPHEME_CHARACTER_SETS = { "en-US": ( 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', diff --git a/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py b/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py index 81fd5c46a648..c669cbea50f2 100644 --- a/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py +++ b/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py @@ -26,6 +26,7 @@ ) from nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon import get_ipa_punctuation_list +from nemo.collections.tts.helpers.helpers import intersperse from nemo.utils import logging from nemo.utils.decorators import experimental @@ -519,6 +520,7 @@ def __init__( oov=BaseTokenizer.OOV, sep='|', # To be able to distinguish between symbols add_blank_at=None, + sep_with_space=False, pad_with_space=False, text_preprocessing_func=lambda text: english_text_preprocessing(text, lower=False), ): @@ -534,6 +536,7 @@ def __init__( sep: Separation token as string. add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), if None then no blank in labels. + sep_with_space: Whether to separate all tokens with spaces (used for VITS) pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. Basically, it replaces all non-unicode characters with unicode ones. @@ -578,6 +581,7 @@ def __init__( self.punct = punct self.pad_with_space = pad_with_space + self.sep_with_space = sep_with_space self.text_preprocessing_func = text_preprocessing_func self.g2p = g2p @@ -621,7 +625,10 @@ def encode_from_g2p(self, g2p_text: List[str], raw_text: Optional[str] = None): while ps[-1] == space: ps.pop() - if self.pad_with_space: + if self.sep_with_space: + ps = intersperse(ps, space) + + if self.pad_with_space and not self.sep_with_space: ps = [space] + ps + [space] # Token index lookups diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index 5572acbd44a9..e79cfe017d35 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -141,7 +141,6 @@ def get_mask_from_lengths(lengths, max_len: Optional[int] = None): mask = (ids < lengths.unsqueeze(1)).bool() return mask - @jit(nopython=True) def mas(attn_map, width=1): # assumes mel x text @@ -561,109 +560,69 @@ def split_view(tensor, split_size: int, dim: int = 0): new_shape = cur_shape[:dim] + (tensor.shape[dim] // split_size, split_size) + cur_shape[dim + 1 :] return tensor.reshape(*new_shape) -class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): +def slice_segments(x, ids_str, segment_size=4): + ret = torch.zeros_like(x[:, :, :segment_size]) + for i in range(x.size(0)): + idx_str = ids_str[i] + idx_end = idx_str + segment_size + x_i = x[i] + if idx_end >= x.size(2): + # pad the sample if it is shorter than the segment size + x_i = torch.nn.functional.pad(x_i, (0, (idx_end + 1) - x.size(2))) + ret[i] = x_i[:, idx_str:idx_end] + return ret + + +def rand_slice_segments(x, x_lengths=None, segment_size=4): + b, d, t = x.size() + if x_lengths is None: + x_lengths = t + ids_str_max = x_lengths - segment_size + 1 + ids_str_max = ids_str_max.to(device=x.device) + ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) + + ret = slice_segments(x, ids_str, segment_size) + + return ret, ids_str + +def clip_grad_value_(parameters, clip_value, norm_type=2): + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + if clip_value is not None: + clip_value = float(clip_value) + + total_norm = 0 + for p in parameters: + param_norm = p.grad.data.norm(norm_type) + total_norm += param_norm.item() ** norm_type + if clip_value is not None: + p.grad.data.clamp_(min=-clip_value, max=clip_value) + total_norm = total_norm ** (1. / norm_type) + return total_norm + +def intersperse(lst, item): + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + +def convert_pad_shape(pad_shape): + l = pad_shape[::-1] + pad_shape = [item for sublist in l for item in sublist] + return pad_shape + +def generate_path(duration, mask): """ - Maintain similar input lengths in a batch. - Length groups are specified by boundaries. - Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. - - It removes samples which are not included in the boundaries. - Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. + duration: [b, 1, t_x] + mask: [b, 1, t_y, t_x] """ - def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): - super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) - self.lengths = dataset.lengths - self.batch_size = batch_size - self.boundaries = boundaries - - self.buckets, self.num_samples_per_bucket = self._create_buckets() - self.total_size = sum(self.num_samples_per_bucket) - self.num_samples = self.total_size // self.num_replicas - - def _create_buckets(self): - buckets = [[] for _ in range(len(self.boundaries) - 1)] - for i in range(len(self.lengths)): - length = self.lengths[i] - idx_bucket = self._bisect(length) - if idx_bucket != -1: - buckets[idx_bucket].append(i) - - for i in range(len(buckets) - 1, 0, -1): - if len(buckets[i]) == 0: - buckets.pop(i) - self.boundaries.pop(i+1) - - num_samples_per_bucket = [] - for i in range(len(buckets)): - len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size - rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size - num_samples_per_bucket.append(len_bucket + rem) - return buckets, num_samples_per_bucket - - def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) - - def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 - - def __len__(self): - return self.num_samples // self.batch_size - - def set_epoch(self, epoch: int) -> None: - """ - Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas - use a different random ordering for each epoch. Otherwise, the next iteration of this - sampler will yield the same ordering. - Args: - epoch (int): Epoch number. - """ - self.epoch = epoch \ No newline at end of file + b, _, t_y, t_x = mask.shape + cum_duration = torch.cumsum(duration, -1) + + cum_duration_flat = cum_duration.view(b * t_x) + path = get_mask_from_lengths(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path.unsqueeze(1).transpose(2,3) * mask + return path \ No newline at end of file diff --git a/nemo/collections/tts/helpers/splines.py b/nemo/collections/tts/helpers/splines.py index e697f0671200..b5fe68bb3d93 100644 --- a/nemo/collections/tts/helpers/splines.py +++ b/nemo/collections/tts/helpers/splines.py @@ -18,7 +18,7 @@ # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - +import numpy as np import torch import torch.nn.functional as F @@ -288,3 +288,187 @@ def piecewise_quadratic_transform(x, w_tilde, v_tilde, inverse=False): # make sure it falls into [0,1) inv = inv.clamp(min=torch.finfo(c.dtype).eps, max=1.0 - torch.finfo(inv.dtype).eps) return inv, None + +def piecewise_rational_quadratic_transform(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1., + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3): + + if tails is None: + spline_fn = rational_quadratic_spline + spline_kwargs = {} + else: + spline_fn = unconstrained_rational_quadratic_spline + spline_kwargs = { + 'tails': tails, + 'tail_bound': tail_bound + } + + outputs, logabsdet = spline_fn( + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs + ) + return outputs, logabsdet + + +def searchsorted(bin_locations, inputs, eps=1e-6): + bin_locations[..., -1] += eps + return torch.sum( + inputs[..., None] >= bin_locations, + dim=-1 + ) - 1 + +def unconstrained_rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1., + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3): + inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) + outside_interval_mask = ~inside_interval_mask + + outputs = torch.zeros_like(inputs) + logabsdet = torch.zeros_like(inputs) + + if tails == 'linear': + unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) + constant = np.log(np.exp(1 - min_derivative) - 1) + unnormalized_derivatives[..., 0] = constant + unnormalized_derivatives[..., -1] = constant + + outputs[outside_interval_mask] = inputs[outside_interval_mask] + logabsdet[outside_interval_mask] = 0 + else: + raise RuntimeError('{} tails are not implemented.'.format(tails)) + + outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( + inputs=inputs[inside_interval_mask], + unnormalized_widths=unnormalized_widths[inside_interval_mask, :], + unnormalized_heights=unnormalized_heights[inside_interval_mask, :], + unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], + inverse=inverse, + left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative + ) + + return outputs, logabsdet + +def rational_quadratic_spline(inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0., right=1., bottom=0., top=1., + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3): + + + if torch.min(inputs) < left or torch.max(inputs) > right: + raise ValueError('Input to a transform is not within its domain') + + num_bins = unnormalized_widths.shape[-1] + + if min_bin_width * num_bins > 1.0: + raise ValueError('Minimal bin width too large for the number of bins') + if min_bin_height * num_bins > 1.0: + raise ValueError('Minimal bin height too large for the number of bins') + + widths = F.softmax(unnormalized_widths, dim=-1) + widths = min_bin_width + (1 - min_bin_width * num_bins) * widths + cumwidths = torch.cumsum(widths, dim=-1) + cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) + cumwidths = (right - left) * cumwidths + left + cumwidths[..., 0] = left + cumwidths[..., -1] = right + widths = cumwidths[..., 1:] - cumwidths[..., :-1] + + derivatives = min_derivative + F.softplus(unnormalized_derivatives) + + heights = F.softmax(unnormalized_heights, dim=-1) + heights = min_bin_height + (1 - min_bin_height * num_bins) * heights + cumheights = torch.cumsum(heights, dim=-1) + cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) + cumheights = (top - bottom) * cumheights + bottom + cumheights[..., 0] = bottom + cumheights[..., -1] = top + heights = cumheights[..., 1:] - cumheights[..., :-1] + + if inverse: + bin_idx = searchsorted(cumheights, inputs)[..., None] + else: + bin_idx = searchsorted(cumwidths, inputs)[..., None] + + input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] + input_bin_widths = widths.gather(-1, bin_idx)[..., 0] + + input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] + delta = heights / widths + input_delta = delta.gather(-1, bin_idx)[..., 0] + + input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] + input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] + + input_heights = heights.gather(-1, bin_idx)[..., 0] + + if inverse: + a = (((inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta) + + input_heights * (input_delta - input_derivatives))) + b = (input_heights * input_derivatives + - (inputs - input_cumheights) * (input_derivatives + + input_derivatives_plus_one + - 2 * input_delta)) + c = - input_delta * (inputs - input_cumheights) + + discriminant = b.pow(2) - 4 * a * c + assert (discriminant >= 0).all() + + root = (2 * c) / (-b - torch.sqrt(discriminant)) + outputs = root * input_bin_widths + input_cumwidths + + theta_one_minus_theta = root * (1 - root) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, -logabsdet + else: + theta = (inputs - input_cumwidths) / input_bin_widths + theta_one_minus_theta = theta * (1 - theta) + + numerator = input_heights * (input_delta * theta.pow(2) + + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) + * theta_one_minus_theta) + outputs = input_cumheights + numerator / denominator + + derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2)) + logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) + + return outputs, logabsdet \ No newline at end of file diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 1eb955e2deb2..545ed7b92306 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -12,21 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -from nemo.core import typecheck - -# typecheck.set_typecheck_enabled(False) +import contextlib import omegaconf import torch import wandb from hydra.utils import instantiate -from omegaconf import DictConfig +from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger -from torch.cuda.amp import autocast, GradScaler +from torch.cuda.amp import autocast from torch.nn import functional as F -from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler +from nemo.collections.tts.helpers.helpers import ( + slice_segments, + clip_grad_value_, + plot_spectrogram_to_numpy, +) from nemo.collections.tts.losses.vits_losses import ( KlLoss, FeatureMatchingLoss, @@ -34,18 +36,18 @@ GeneratorLoss ) from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_modules import ( - MultiPeriodDiscriminator, - SynthesizerTrn, - audio_to_mel_torch, - clip_grad_value_, - slice_segments, - spec_to_mel_torch, -) +from nemo.collections.tts.modules.vits_modules import *# MultiPeriodDiscriminator +from nemo.collections.tts.torch.data import DistributedBucketSampler +from nemo.collections.tts.torch.tts_data_types import SpeakerID from nemo.core.classes.common import PretrainedModelInfo -from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils +HAVE_WANDB = True +try: + import wandb +except ModuleNotFoundError: + HAVE_WANDB = False + class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Convert to Hydra 1.0 compatible DictConfig @@ -66,67 +68,26 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): num_tokens = len(self.tokenizer.tokens) self.tokenizer_pad = self.tokenizer.pad - self.tokenizer_unk = self.tokenizer.oov - - # self.scaler = GradScaler() super().__init__(cfg=cfg, trainer=trainer) - self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) + self.audio_to_melspec_processor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) self.feat_matching_loss = FeatureMatchingLoss() self.disc_loss = DiscriminatorLoss() self.gen_loss = GeneratorLoss() self.kl_loss = KlLoss() - self.log_train_images = False - self.logged_real_samples = False - self._tb_logger = None - self.hann_window = None - self.sample_rate = cfg.sample_rate - self.hop_size = cfg.n_window_stride - self.n_fft = cfg.train_ds.dataset.n_fft - self.win_length = cfg.train_ds.dataset.win_length - - # TODO: need to add SynthesizerTrn in config - self.net_g = SynthesizerTrn( + self.net_g = instantiate(cfg.synthesizer, n_vocab=num_tokens, - spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, - segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, - inter_channels=cfg.inter_channels, - hidden_channels=cfg.hidden_channels, - filter_channels=cfg.filter_channels, - n_heads=cfg.n_heads, - n_layers=cfg.n_layers, - kernel_size=cfg.pitch_embedding_kernel_size, - p_dropout=cfg.p_dropout, - padding_idx=self.tokenizer_pad, - resblock=cfg.generator.resblock, - resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, - resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, - upsample_rates=cfg.generator.upsample_rates, - upsample_initial_channel=cfg.generator.upsample_initial_channel, - upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, - ) + spec_channels=cfg.n_fft // 2 + 1, + segment_size=cfg.segment_size // cfg.n_window_stride, + padding_idx=self.tokenizer_pad,) + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) + self.automatic_optimization = False - window_fn = { - 'hann': torch.hann_window, - 'hamming': torch.hamming_window, - 'blackman': torch.blackman_window, - 'bartlett': torch.bartlett_window, - 'none': None, - }.get(self.hann_window, None) - - self.stft = lambda x: torch.stft( - input=x, - n_fft=self.n_fft, - hop_length=self.hop_size, - win_length=self.win_length, - window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, - ) - def _setup_normalizer(self, cfg): if "text_normalizer" in cfg: normalizer_kwargs = {} @@ -160,89 +121,69 @@ def _setup_tokenizer(self, cfg): self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) - def parse(self, str_input: str) -> torch.tensor: - # TODO: Implement - pass + def parse(self, text: str, normalize=True) -> torch.tensor: + if self.training: + logging.warning("parse() is meant to be called in eval mode.") + if normalize and self.text_normalizer_call is not None: + text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs) + + eval_phon_mode = contextlib.nullcontext() + if hasattr(self.tokenizer, "set_phone_prob"): + eval_phon_mode = self.tokenizer.set_phone_prob(prob=1.0) + + with eval_phon_mode: + tokens = self.tokenizer.encode(text) + + return torch.tensor(tokens).long().unsqueeze(0).to(self.device) def configure_optimizers(self): - optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - - max_steps=800000 - min_lr = 1e-5 - wu_ratio = 0.02 - wu_steps = 16000 - - # scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=max_steps, min_lr=min_lr, warmup_steps=wu_steps,) - # scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) + optim_config = self._cfg.optim.copy() + OmegaConf.set_struct(optim_config, False) + sched_config = optim_config.pop("sched", None) + OmegaConf.set_struct(optim_config, True) - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) - - scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} - scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} + optim_g = instantiate(optim_config, params=self.net_g.parameters(),) + optim_d = instantiate(optim_config, params=self.net_d.parameters(),) - return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] - - # only for inference - def forward(self, batch, batch_idx, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - with torch.no_grad(): - (y, y_lengths, x, x_lengths) = batch - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - - y_hat, attn, mask, (z, z_p, m_p, logs_p) = self.net_g.infer(x, x_lengths, sid=sid, noise_scale=noise_scale, - length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.n_window_stride - return y_hat, y_hat_lengths, (z, z_p, m_p, logs_p) - - def get_spec(self, audio): - with torch.cuda.amp.autocast(enabled=False): - spec = self.stft(audio) - if spec.dtype in [torch.cfloat, torch.cdouble]: - spec = torch.view_as_real(spec) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) - return spec + if sched_config is not None: + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=sched_config.lr_decay) + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=sched_config.lr_decay) + + scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} + scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} + return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] + else: + return [optim_g, optim_d] - def training_step(self, batch, batch_idx): - # get optimizers - optim_g, optim_d = self.optimizers() + # for inference + def forward(self, tokens, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=1000): + x_lengths = tokens.size(-1) + y_hat = self.net_g.infer(tokens, x_lengths, sid=sid, noise_scale=noise_scale, + length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=max_len)[0] - (y, y_lengths, x, x_lengths) = batch + return y_hat - spec = self.get_spec(y) - spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) + def training_step(self, batch, batch_idx): + speakers = None + if SpeakerID in self._train_dl.dataset.sup_data_types_set: + (y, y_lengths, x, x_lengths, speakers) = batch + else: + (y, y_lengths, x, x_lengths) = batch + spec, spec_lengths = self.audio_to_melspec_processor(y, y_lengths, linear_spec=True) + with autocast(enabled=True): y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths + x, x_lengths, spec, spec_lengths, speakers ) - mel = spec_to_mel_torch( - spec, - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) - + # y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) y_hat = y_hat.float() - y_hat_mel = audio_to_mel_torch( - y_hat.squeeze(1), - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self.cfg.n_window_stride, - self._cfg.preprocessor.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - y = torch.unsqueeze(y, 1) - y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + y_hat_mel, _ = self.audio_to_melspec_processor(y_hat.squeeze(1), y_lengths, linear_spec=False) + + y = slice_segments(y.unsqueeze(1), ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + y_mel, _ = self.audio_to_melspec_processor(y.squeeze(1), y_lengths, linear_spec=False) with autocast(enabled=True): y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) @@ -252,13 +193,15 @@ def training_step(self, batch, batch_idx): disc_generated_outputs=y_d_hat_g) loss_disc_all = loss_disc + # get optimizers + optim_g, optim_d = self.optimizers() # train discriminator optim_d.zero_grad() self.manual_backward(loss_disc_all) norm_d = clip_grad_value_(self.net_d.parameters(), None) optim_d.step() - + with autocast(enabled=True): y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) # Generator @@ -307,47 +250,53 @@ def training_step(self, batch, batch_idx): self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): - (y, y_lengths, x, x_lengths) = batch + speakers = None + # if SpeakerID in self._train_dl.dataset.sup_data_types_set: + if self.cfg.n_speakers > 1: + (y, y_lengths, x, x_lengths, speakers) = batch + else: + (y, y_lengths, x, x_lengths) = batch + + if speakers == None: + print(speakers) + y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, speakers, max_len=1000) - y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) y_hat = y_hat.squeeze() - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length + y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.validation_ds.dataset.hop_length - mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) + mel, mel_lengths = self.audio_to_melspec_processor(y, y_lengths) + y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_processor(y_hat, y_hat_lengths) # plot audio once per epoch - if batch_idx == 0: + if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB: logger = self.logger.experiment - # print(logger, self.logger) - if logger is not None and isinstance(self.logger, WandbLogger): - specs = [] - audios = [] - - specs += [ - wandb.Image( - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", - ), - wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - caption=f"val_mel_predicted", - ), - ] - - audios += [ - wandb.Audio( - y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_target", - sample_rate=self.sample_rate, - ), - wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_predicted", - sample_rate=self.sample_rate, - ), - ] - - logger.log({"specs": specs, "audios": audios}) + specs = [] + audios = [] + + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].data.cpu().numpy()), caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].data.cpu().numpy()), + caption=f"val_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_target", + sample_rate=self._cfg.sample_rate, + ), + wandb.Audio( + y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_predicted", + sample_rate=self._cfg.sample_rate, + ), + ] + + logger.log({"specs": specs, "audios": audios}) def _loader(self, cfg): try: @@ -377,9 +326,7 @@ def train_dataloader(self): train_sampler = DistributedBucketSampler( dataset, - self.cfg.train_ds.batch_sampler.batch_size, - self.cfg.train_ds.batch_sampler.boundaries, - shuffle=self.cfg.train_ds.batch_sampler.shuffle) + **self.cfg.train_ds.batch_sampler) dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, **self.cfg.train_ds.dataloader_params,) @@ -402,5 +349,4 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': return list_of_models def convert_text_to_waveform(self, *, tokens): - # TODO: Convert text to waveforms - pass + return self(tokens).squeeze(1) diff --git a/nemo/collections/tts/models/vits_test.py b/nemo/collections/tts/models/vits_test.py deleted file mode 100644 index 048c50dc1358..000000000000 --- a/nemo/collections/tts/models/vits_test.py +++ /dev/null @@ -1,434 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from nemo.core import typecheck - -# typecheck.set_typecheck_enabled(False) - -import omegaconf -import torch -import wandb -from hydra.utils import instantiate -from omegaconf import DictConfig -from pytorch_lightning import Trainer -from pytorch_lightning.loggers import WandbLogger -from torch.cuda.amp import autocast, GradScaler -from torch.nn import functional as F - -from nemo.collections.tts.helpers.helpers import plot_spectrogram_to_numpy, DistributedBucketSampler -from nemo.collections.tts.losses.vits_losses import ( - KlLoss, - FeatureMatchingLoss, - DiscriminatorLoss, - GeneratorLoss -) -from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_modules import ( - MultiPeriodDiscriminator, - SynthesizerTrn, - audio_to_mel_torch, - clip_grad_value_, - slice_segments, - spec_to_mel_torch, -) -from nemo.core.classes.common import PretrainedModelInfo -from nemo.core.optim.lr_scheduler import CosineAnnealing -from nemo.utils import logging, model_utils - -class VitsModel(TextToWaveform): - def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): - # Convert to Hydra 1.0 compatible DictConfig - - cfg = model_utils.convert_model_config_to_dict_config(cfg) - cfg = model_utils.maybe_update_config_version(cfg) - - # setup normalizer - self.normalizer = None - self.text_normalizer_call = None - self.text_normalizer_call_kwargs = {} - self._setup_normalizer(cfg) - - # setup tokenizer - self.tokenizer = None - self._setup_tokenizer(cfg) - assert self.tokenizer is not None - - num_tokens = len(self.tokenizer.tokens) - self.tokenizer_pad = self.tokenizer.pad - self.tokenizer_unk = self.tokenizer.oov - - # self.scaler = GradScaler() - - super().__init__(cfg=cfg, trainer=trainer) - - self.audio_to_melspec_precessor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) - - self.feat_matching_loss = FeatureMatchingLoss() - self.disc_loss = DiscriminatorLoss() - self.gen_loss = GeneratorLoss() - self.kl_loss = KlLoss() - - self.log_train_images = False - self.logged_real_samples = False - self._tb_logger = None - self.hann_window = None - self.sample_rate = cfg.sample_rate - self.hop_size = cfg.n_window_stride - self.n_fft = cfg.train_ds.dataset.n_fft - self.win_length = cfg.train_ds.dataset.win_length - - # TODO: need to add SynthesizerTrn in config - self.net_g = SynthesizerTrn( - n_vocab=num_tokens, - spec_channels=cfg.train_ds.dataset.n_fft // 2 + 1, - segment_size=cfg.segment_size // cfg.train_ds.dataset.hop_length, - inter_channels=cfg.inter_channels, - hidden_channels=cfg.hidden_channels, - filter_channels=cfg.filter_channels, - n_heads=cfg.n_heads, - n_layers=cfg.n_layers, - kernel_size=cfg.pitch_embedding_kernel_size, - p_dropout=cfg.p_dropout, - padding_idx=self.tokenizer_pad, - resblock=cfg.generator.resblock, - resblock_kernel_sizes=cfg.generator.resblock_kernel_sizes, - resblock_dilation_sizes=cfg.generator.resblock_dilation_sizes, - upsample_rates=cfg.generator.upsample_rates, - upsample_initial_channel=cfg.generator.upsample_initial_channel, - upsample_kernel_sizes=cfg.generator.upsample_kernel_sizes, - ) - self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) - self.automatic_optimization = True - - window_fn = { - 'hann': torch.hann_window, - 'hamming': torch.hamming_window, - 'blackman': torch.blackman_window, - 'bartlett': torch.bartlett_window, - 'none': None, - }.get(self.hann_window, None) - - self.stft = lambda x: torch.stft( - input=x, - n_fft=self.n_fft, - hop_length=self.hop_size, - win_length=self.win_length, - window=window_fn(self.win_length, periodic=False).to(torch.float) if window_fn else None, - ) - - def _setup_normalizer(self, cfg): - if "text_normalizer" in cfg: - normalizer_kwargs = {} - - if "whitelist" in cfg.text_normalizer: - normalizer_kwargs["whitelist"] = self.register_artifact( - 'text_normalizer.whitelist', cfg.text_normalizer.whitelist - ) - - self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) - self.text_normalizer_call = self.normalizer.normalize - if "text_normalizer_call_kwargs" in cfg: - self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs - - def _setup_tokenizer(self, cfg): - text_tokenizer_kwargs = {} - if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: - g2p_kwargs = {} - - if "phoneme_dict" in cfg.text_tokenizer.g2p: - g2p_kwargs["phoneme_dict"] = self.register_artifact( - 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, - ) - - if "heteronyms" in cfg.text_tokenizer.g2p: - g2p_kwargs["heteronyms"] = self.register_artifact( - 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, - ) - - text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) - - self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) - - def parse(self, str_input: str) -> torch.tensor: - # TODO: Implement - pass - - def configure_optimizers(self): - optim_g = torch.optim.AdamW(self.net_g.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - optim_d = torch.optim.AdamW(self.net_d.parameters(), self._cfg.lr, betas=self._cfg.betas, eps=self._cfg.eps, weight_decay=0.01) - - max_steps=800000 - min_lr = 1e-5 - wu_ratio = 0.02 - wu_steps = 16000 - - scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=max_steps, min_lr=min_lr, warmup_steps=wu_steps,) - scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=max_steps, min_lr=min_lr)#, warmup_steps=1000,) - - # scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=self._cfg.lr_decay) - # scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=self._cfg.lr_decay) - scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} - scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} - - return [optim_d, optim_g], [scheduler_d_dict, scheduler_g_dict] - - # only for inference - def forward(self, batch, batch_idx, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - with torch.no_grad(): - (y, y_lengths, x, x_lengths) = batch - # remove else - x = x[:1] - x_lengths = x_lengths[:1] - - y_hat, attn, mask, (z, z_p, m_p, logs_p) = self.net_g.infer(x, x_lengths, sid=sid, noise_scale=noise_scale, - length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=1000) - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.n_window_stride - return y_hat, y_hat_lengths, (z, z_p, m_p, logs_p) - - def get_spec(self, audio): - with torch.cuda.amp.autocast(enabled=False): - spec = self.stft(audio) - if spec.dtype in [torch.cfloat, torch.cdouble]: - spec = torch.view_as_real(spec) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) - return spec - - def training_step(self, batch, batch_idx, optimizer_idx): - # get optimizers - # optim_g, optim_d = self.optimizers() - - (y, y_lengths, x, x_lengths) = batch - - spec = self.get_spec(y) - spec_lengths = self.audio_to_melspec_precessor.get_seq_len(y_lengths) - - # train discriminator - if optimizer_idx == 0: - # with autocast(enabled=True): - with torch.no_grad(): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths - ) - - self.stash = y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - y = torch.unsqueeze(y, 1) - y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - # with autocast(enabled=True): - - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) - # with autocast(enabled=False): - # loss_disc_real, loss_disc_gen, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - disc_generated_outputs=y_d_hat_g) - # loss_disc_all = torch.max(loss_disc_real, loss_disc_gen) - loss_disc_all = loss_disc - # if self.global_step <= 180000: - # train discriminator - # optim_d.zero_grad() - # self.manual_backward(loss_disc_all) - norm_d = clip_grad_value_(self.net_d.parameters(), None) - # optim_d.step() - - metrics = { - "loss_disc_all": loss_disc_all, - "grad_disc": norm_d, - } - - for i, v in enumerate(losses_disc_r): - metrics[f"loss_disc_r_{i}"] = v - - for i, v in enumerate(losses_disc_g): - metrics[f"loss_disc_g_{i}"] = v - - self.log_dict(metrics, on_step=True, sync_dist=True) - print('disc', loss_disc_all) - return loss_disc_all - - # train generator - if optimizer_idx == 1: - # with autocast(enabled=True): - mel = spec_to_mel_torch( - spec, - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths - ) - - y = torch.unsqueeze(y, 1) - y = slice_segments(y, ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) - - y_hat = y_hat.float() - y_hat_mel = audio_to_mel_torch( - y_hat.squeeze(1), - self._cfg.n_window_size, - self._cfg.n_mel_channels, - self._cfg.sample_rate, - self.cfg.n_window_stride, - self._cfg.preprocessor.n_window_size, - self._cfg.mel_fmin, - self._cfg.mel_fmax, - ) - - # Generator - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) - # with autocast(enabled=False): - loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel - loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl - loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) - loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) - loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - - # if loss_gen > loss_disc: - # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl + loss_gen - # else: - # loss_gen_all = loss_fm + loss_mel + loss_dur + loss_kl - - # train generator - # optim_g.zero_grad() - # self.manual_backward(loss_gen_all) - norm_g = clip_grad_value_(self.net_g.parameters(), None) - # optim_g.step() - - metrics = { - "loss_gen": loss_gen, - "loss_fm": loss_fm, - "loss_mel * c_mel": loss_mel, - "loss_dur": loss_dur, - "loss_kl * c_kl": loss_kl, - "loss_gen_all": loss_gen_all, - "grad_gen": norm_g, - } - - for i, v in enumerate(losses_gen): - metrics[f"loss_gen_i_{i}"] = v - - self.log_dict(metrics, on_step=True, sync_dist=True) - print('gen', loss_gen_all) - return loss_gen_all - - # schedulers = self.lr_schedulers() - # if schedulers is not None:# and self.trainer.is_last_batch: - # sch1, sch2 = schedulers - # sch1.step() - # sch2.step() - - - - def validation_step(self, batch, batch_idx): - (y, y_lengths, x, x_lengths) = batch - - # TODO: fix hardcode - y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, max_len=1000) - y_hat = y_hat.squeeze() - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.train_ds.dataset.hop_length - - mel, mel_lengths = self.audio_to_melspec_precessor(y, y_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_precessor(y_hat, y_hat_lengths) - - # plot audio once per epoch - if batch_idx == 0: - logger = self.logger.experiment - # print(logger, self.logger) - if logger is not None and isinstance(self.logger, WandbLogger): - specs = [] - audios = [] - - specs += [ - wandb.Image( - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].cpu().numpy()), caption=f"val_mel_target", - ), - wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].cpu().numpy()), - caption=f"val_mel_predicted", - ), - ] - - audios += [ - wandb.Audio( - y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_target", - sample_rate=self.sample_rate, - ), - wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), - caption=f"val_wav_predicted", - sample_rate=self.sample_rate, - ), - ] - - logger.log({"specs": specs, "audios": audios}) - - def _loader(self, cfg): - try: - # _ = cfg.model.train_ds.manifest_filepath - _ = cfg['dataset']['manifest_filepath'] - except omegaconf.errors.MissingMandatoryValue: - logging.warning("manifest_filepath was skipped. No dataset for this model.") - return None - - dataset = instantiate( - cfg.dataset, - text_normalizer=self.normalizer, - text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, - text_tokenizer=self.tokenizer, - ) - return torch.utils.data.DataLoader( # noqa - dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, - ) - - def train_dataloader(self): - # default used by the Trainer - dataset = instantiate( - self.cfg.train_ds.dataset, - text_normalizer=self.normalizer, - text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, - text_tokenizer=self.tokenizer, - ) - - train_sampler = DistributedBucketSampler( - dataset, - self.cfg.train_ds.batch_sampler.batch_size, - [32,300,400,500,600,700,800,900,1000], - shuffle=True) - dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, - **self.cfg.train_ds.dataloader_params,) - return dataloader - - def setup_training_data(self, cfg): - self._train_dl = self._loader(cfg) - - def setup_validation_data(self, cfg): - self._validation_dl = self._loader(cfg) - - def setup_test_data(self, cfg): - """Omitted.""" - pass - - @classmethod - def list_available_models(cls) -> 'List[PretrainedModelInfo]': - list_of_models = [] - # TODO: List available models?? - return list_of_models - - def convert_text_to_waveform(self, *, tokens): - # TODO: Convert text to waveforms - pass diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 9bba7200dd8a..8ba11c512e70 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -44,22 +44,22 @@ from librosa.filters import mel as librosa_mel_fn from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm +from nemo.collections.tts.modules.hifigan_modules import ResBlock1, ResBlock2, init_weights, get_padding +from nemo.collections.tts.helpers.splines import piecewise_rational_quadratic_transform from nemo.collections.tts.modules.monotonic_align import maximum_path - -# TODO: need to do LARGE refactoring - +from nemo.collections.tts.helpers.helpers import convert_pad_shape, generate_path, get_mask_from_lengths, rand_slice_segments LRELU_SLOPE = 0.1 -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): @@ -228,78 +228,6 @@ def remove_weight_norm(self): for l in self.res_skip_layers: torch.nn.utils.remove_weight_norm(l) -# TODO: reuse from hifigan if it is possible? -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - -# TODO: reuse from hifigan if it is possible? -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - class Log(nn.Module): def forward(self, x, x_mask, reverse=False, **kwargs): @@ -418,16 +346,18 @@ def forward(self, x, x_mask, g=None, reverse=False): unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - + + if x1.size(0) != 0: + x1, logabsdet = piecewise_rational_quadratic_transform(x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound + ) + else: + logdet = 0 x = torch.cat([x0, x1], 1) * x_mask logdet = torch.sum(logabsdet * x_mask, [1, 2]) if not reverse: @@ -592,7 +522,7 @@ def __init__(self, def forward(self, x, x_lengths): x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype) x = self.encoder(x * x_mask, x_mask) stats = self.proj(x) * x_mask @@ -657,7 +587,7 @@ def __init__(self, self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) + x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask @@ -667,7 +597,6 @@ def forward(self, x, x_lengths, g=None): z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask -# TODO: reuse from hifigan if it is possible? class Generator(torch.nn.Module): def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): super(Generator, self).__init__() @@ -702,12 +631,9 @@ def forward(self, x, g=None): for i in range(self.num_upsamples): x = F.leaky_relu(x, LRELU_SLOPE) x = self.ups[i](x) - xs = None + xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device) for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) + xs += self.resblocks[i*self.num_kernels+j](x) x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) @@ -722,7 +648,6 @@ def remove_weight_norm(self): for l in self.resblocks: l.remove_weight_norm() -# TODO: reuse from hifigan if it is possible? class DiscriminatorP(torch.nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() @@ -790,7 +715,6 @@ def forward(self, x): return x, fmap -# TODO: reuse from hifigan if it is possible? class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(MultiPeriodDiscriminator, self).__init__() @@ -889,9 +813,8 @@ def __init__(self, self.emb_g = nn.Embedding(n_speakers, gin_channels) def forward(self, x, x_lengths, y, y_lengths, sid=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: + if self.n_speakers > 1: g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -930,7 +853,7 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: + if self.n_speakers > 1: g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -942,7 +865,7 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca w = torch.exp(logw) * x_mask * length_scale w_ceil = torch.ceil(w) y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(sequence_mask(y_lengths, None), 1).to(x_mask.dtype) + y_mask = torch.unsqueeze(get_mask_from_lengths(y_lengths, None), 1).to(x_mask.dtype) attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) attn = generate_path(w_ceil, attn_mask) @@ -954,9 +877,9 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca o = self.dec((z * y_mask)[:,:,:max_len], g=g) return o, attn, y_mask, (z, z_p, m_p, logs_p) - # TODO: do we really need it? Can be used for emotions conversion + # Can be used for emotions conversion def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." + assert self.n_speakers > 1, "n_speakers have to be larger than 1." g_src = self.emb_g(sid_src).unsqueeze(-1) g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) @@ -965,209 +888,6 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): o_hat = self.dec(z_hat * y_mask, g=g_tgt) return o_hat, y_mask, (z, z_p, z_hat) -################## -# Mel_processing # -################## - -mel_basis = {} -hann_window = {} - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def audio_to_mel_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec - - -########### -# Commons # -########### - -def init_weights(m, mean=0.0, std=0.01): - classname = m.__class__.__name__ - if classname.find("Conv") != -1: - m.weight.data.normal_(mean, std) - - -def get_padding(kernel_size, dilation=1): - return int((kernel_size*dilation - dilation)/2) - - -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - -def kl_divergence(m_p, logs_p, m_q, logs_q): - """KL(P||Q)""" - kl = (logs_q - logs_p) - 0.5 - kl += 0.5 * (torch.exp(2. * logs_p) + ((m_p - m_q)**2)) * torch.exp(-2. * logs_q) - return kl - - -def rand_gumbel(shape): - """Sample from the Gumbel distribution, protect from overflows.""" - uniform_samples = torch.rand(shape) * 0.99998 + 0.00001 - return -torch.log(-torch.log(uniform_samples)) - - -def rand_gumbel_like(x): - g = rand_gumbel(x.size()).to(dtype=x.dtype, device=x.device) - return g - - -def slice_segments(x, ids_str, segment_size=4): - ret = torch.zeros_like(x[:, :, :segment_size]) - for i in range(x.size(0)): - idx_str = ids_str[i] - idx_end = idx_str + segment_size - ret[i] = x[i, :, idx_str:idx_end] - return ret - - -def rand_slice_segments(x, x_lengths=None, segment_size=4): - b, d, t = x.size() - if x_lengths is None: - x_lengths = t - ids_str_max = x_lengths - segment_size + 1 - ids_str_max = ids_str_max.to(device=x.device) - # torch.manual_seed(1) - # torch.cuda.manual_seed(1) - ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - ret = slice_segments(x, ids_str, segment_size) - return ret, ids_str - - -def get_timing_signal_1d( - length, channels, min_timescale=1.0, max_timescale=1.0e4): - position = torch.arange(length, dtype=torch.float) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (num_timescales - 1)) - inv_timescales = min_timescale * torch.exp( - torch.arange(num_timescales, dtype=torch.float) * -log_timescale_increment) - scaled_time = position.unsqueeze(0) * inv_timescales.unsqueeze(1) - signal = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], 0) - signal = F.pad(signal, [0, 0, 0, channels % 2]) - signal = signal.view(1, channels, length) - return signal - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return x + signal.to(dtype=x.dtype, device=x.device) - - -def cat_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4, axis=1): - b, channels, length = x.size() - signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale) - return torch.cat([x, signal.to(dtype=x.dtype, device=x.device)], axis) - - -def subsequent_mask(length): - mask = torch.tril(torch.ones(length, length)).unsqueeze(0).unsqueeze(0) - return mask - - -@torch.jit.script -def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): - n_channels_int = n_channels[0] - in_act = input_a + input_b - t_act = torch.tanh(in_act[:, :n_channels_int, :]) - s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) - acts = t_act * s_act - return acts - - -def convert_pad_shape(pad_shape): - l = pad_shape[::-1] - pad_shape = [item for sublist in l for item in sublist] - return pad_shape - - -def shift_1d(x): - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1] - return x - -# TODO: reuse from helpers get_mask_from_lengths? -def sequence_mask(length, max_length=None): - if max_length is None: - max_length = length.max() - x = torch.arange(max_length, dtype=length.dtype, device=length.device) - return x.unsqueeze(0) < length.unsqueeze(1) - - -def generate_path(duration, mask): - """ - duration: [b, 1, t_x] - mask: [b, 1, t_y, t_x] - """ - b, _, t_y, t_x = mask.shape - cum_duration = torch.cumsum(duration, -1) - - cum_duration_flat = cum_duration.view(b * t_x) - path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) - path = path.view(b, t_x, t_y) - path = path - F.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path - - -def clip_grad_value_(parameters, clip_value, norm_type=2): - if isinstance(parameters, torch.Tensor): - parameters = [parameters] - parameters = list(filter(lambda p: p.grad is not None, parameters)) - norm_type = float(norm_type) - if clip_value is not None: - clip_value = float(clip_value) - - total_norm = 0 - for p in parameters: - param_norm = p.grad.data.norm(norm_type) - total_norm += param_norm.item() ** norm_type - if clip_value is not None: - p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) - return total_norm - ############## # Attentions # @@ -1461,197 +1181,4 @@ def _same_padding(self, x): pad_r = self.kernel_size // 2 padding = [[0, 0], [0, 0], [pad_l, pad_r]] x = F.pad(x, convert_pad_shape(padding)) - return x - - -############## -# Transforms # -############## - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet + return x \ No newline at end of file diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 54ba073ff934..f626388f86df 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -22,7 +22,6 @@ from typing import Callable, Dict, List, Optional, Union import librosa -from nemo.collections.tts.modules.vits_modules import intersperse import numpy as np import torch from tqdm import tqdm @@ -33,7 +32,7 @@ BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer, - IPAPhonemesTokenizer, + IPATokenizer, ) from nemo.collections.tts.torch.helpers import ( BetaBinomialInterpolator, @@ -106,7 +105,6 @@ def __init__( n_mels: int = 80, lowfreq: int = 0, highfreq: Optional[int] = None, - add_blank=True, **kwargs, ): """Dataset which can be used for training spectrogram generators and end-to-end TTS models. @@ -177,7 +175,7 @@ def __init__( self.text_tokenizer = text_tokenizer self.phoneme_probability = None - if isinstance(self.text_tokenizer, IPAPhonemesTokenizer): + if isinstance(self.text_tokenizer, IPATokenizer): self.text_tokenizer_pad_id = text_tokenizer.pad self.tokens = text_tokenizer.tokens self.phoneme_probability = getattr(self.text_tokenizer, "phoneme_probability", None) @@ -265,10 +263,6 @@ def __init__( self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) - random.seed(1234) - random.shuffle(self.data) - - self.add_blank = add_blank # Initialize audio and mel related parameters self.sample_rate = sample_rate self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate) @@ -502,14 +496,10 @@ def __getitem__(self, index): if "text_tokens" in sample: text = sample["text_tokens"] - if self.add_blank: - text = intersperse(text, 0) text = torch.tensor(text).long() text_length = torch.tensor(len(text)).long() else: tokenized = self.text_tokenizer(sample["normalized_text"]) - if self.add_blank: - tokenized = intersperse(tokenized, 0) text = torch.tensor(tokenized).long() text_length = torch.tensor(len(tokenized)).long() @@ -1038,307 +1028,110 @@ def __getitem__(self, index): def __len__(self): return len(self.data) -class VitsDataset(Dataset): - def __init__( - self, - manifest_filepath: Union[str, Path, List[str], List[Path]], - sample_rate: int, - text_tokenizer: Union[BaseTokenizer, Callable[[str], List[int]]], - tokens: Optional[List[str]] = None, - text_normalizer: Optional[Union[Normalizer, Callable[[str], str]]] = None, - text_normalizer_call_kwargs: Optional[Dict] = None, - text_tokenizer_pad_id: Optional[int] = None, - sup_data_types: Optional[List[str]] = None, - sup_data_path: Optional[Union[Path, str]] = None, - max_duration: Optional[float] = None, - min_duration: Optional[float] = 0.1, - ignore_file: Optional[Union[str, Path]] = None, - trim: bool = False, - n_fft: int = 1024, - win_length: Optional[int] = 1024, - hop_length: Optional[int] = 256, - n_mels: int = 80, - add_blank=True, - **kwargs, - ): - """Dataset which can be used for training spectrogram generators and end-to-end TTS models. - It loads main data types (audio, text) and specified supplementary data types (log mel, durations, align prior matrix, pitch, energy, speaker id). - Some of supplementary data types will be computed on the fly and saved in the sup_data_path if they did not exist before. - Saved folder can be changed for some supplementary data types (see keyword args section). - Arguments for supplementary data should be also specified in this class and they will be used from kwargs (see keyword args section). - Args: - manifest_filepath (Union[str, Path, List[str], List[Path]]): Path(s) to the .json manifests containing information on the - dataset. Each line in the .json file should be valid json. Note: the .json file itself is not valid - json. Each line should contain the following: - "audio_filepath": , - "text": , - "normalized_text": (Optional), - "mel_filepath": (Optional), - "duration": (Optional) - sample_rate (int): The sample rate of the audio. Or the sample rate that we will resample all files to. - text_tokenizer (Optional[Union[BaseTokenizer, Callable[[str], List[int]]]]): BaseTokenizer or callable which represents text tokenizer. - tokens (Optional[List[str]]): Tokens from text_tokenizer. Should be specified if text_tokenizer is not BaseTokenizer. - text_normalizer (Optional[Union[Normalizer, Callable[[str], str]]]): Normalizer or callable which represents text normalizer. - text_normalizer_call_kwargs (Optional[Dict]): Additional arguments for text_normalizer function. - text_tokenizer_pad_id (Optional[int]): Index of padding. Should be specified if text_tokenizer is not BaseTokenizer. - sup_data_types (Optional[List[str]]): List of supplementary data types. - sup_data_path (Optional[Union[Path, str]]): A folder that contains or will contain supplementary data (e.g. pitch). - max_duration (Optional[float]): Max duration of audio clips in seconds. All samples exceeding this will be - pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load - audio to compute duration. Defaults to None which does not prune. - min_duration (Optional[float]): Min duration of audio clips in seconds. All samples lower than this will be - pruned prior to training. Note: Requires "duration" to be set in the manifest file. It does not load - audio to compute duration. Defaults to None which does not prune. - ignore_file (Optional[Union[str, Path]]): The location of a pickle-saved list of audio paths - that will be pruned prior to training. Defaults to None which does not prune. - trim (Optional[bool]): Whether to apply librosa.effects.trim to the audio file. Defaults to False. - n_fft (int): The number of fft samples. Defaults to 1024 - win_length (Optional[int]): The length of the stft windows. Defaults to None which uses n_fft. - hop_length (Optional[int]): The hope length between fft computations. Defaults to None which uses n_fft//4. - window (str): One of 'hann', 'hamming', 'blackman','bartlett', 'none'. Which corresponds to the - equivalent torch window function. - n_mels (int): The number of mel filters. Defaults to 80. - lowfreq (int): The lowfreq input to the mel filter calculation. Defaults to 0. - highfreq (Optional[int]): The highfreq input to the mel filter calculation. Defaults to None. - Keyword Args: - log_mel_folder (Optional[Union[Path, str]]): The folder that contains or will contain log mel spectrograms. - align_prior_matrix_folder (Optional[Union[Path, str]]): The folder that contains or will contain align prior matrices. - pitch_folder (Optional[Union[Path, str]]): The folder that contains or will contain pitch. - energy_folder (Optional[Union[Path, str]]): The folder that contains or will contain energy. - durs_file (Optional[str]): String path to pickled durations location. - durs_type (Optional[str]): Type of durations. Currently supported only "aligner-based". - use_beta_binomial_interpolator (Optional[bool]): Whether to use beta-binomial interpolator for calculating alignment prior matrix. Defaults to False. - pitch_fmin (Optional[float]): The fmin input to librosa.pyin. Defaults to librosa.note_to_hz('C2'). - pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7'). - pitch_mean (Optional[float]): The mean that we use to normalize the pitch. - pitch_std (Optional[float]): The std that we use to normalize the pitch. - pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_mean and pitch_std) or not. - """ - super().__init__() - - # Initialize text tokenizer - self.text_tokenizer = text_tokenizer - - self.phoneme_probability = None - if isinstance(self.text_tokenizer, IPAPhonemesTokenizer): - self.text_tokenizer_pad_id = text_tokenizer.pad - self.tokens = text_tokenizer.tokens - self.phoneme_probability = getattr(self.text_tokenizer, "phoneme_probability", None) - else: - if text_tokenizer_pad_id is None: - raise ValueError(f"text_tokenizer_pad_id must be specified if text_tokenizer is not BaseTokenizer") - - if tokens is None: - raise ValueError(f"tokens must be specified if text_tokenizer is not BaseTokenizer") - - self.text_tokenizer_pad_id = text_tokenizer_pad_id - self.tokens = tokens - self.cache_text = True if self.phoneme_probability is None else False - - # Initialize text normalizer is specified - self.text_normalizer = text_normalizer - self.text_normalizer_call = ( - self.text_normalizer.normalize if isinstance(self.text_normalizer, Normalizer) else self.text_normalizer - ) - self.text_normalizer_call_kwargs = ( - text_normalizer_call_kwargs if text_normalizer_call_kwargs is not None else {} - ) - - # Initialize and read manifest file(s), filter out data by duration and ignore_file, compute base dir - if isinstance(manifest_filepath, str): - manifest_filepath = [manifest_filepath] - self.manifest_filepath = manifest_filepath - self.lengths = [] - - data = [] - total_duration = 0 - for manifest_file in self.manifest_filepath: - with open(Path(manifest_file).expanduser(), 'r') as f: - logging.info(f"Loading dataset from {manifest_file}.") - for line in tqdm(f): - item = json.loads(line) - - file_info = { - "audio_filepath": "../" + item["audio_filepath"], - "original_text": item["text"], - "mel_filepath": item["mel_filepath"] if "mel_filepath" in item else None, - "duration": item["duration"] if "duration" in item else None, - } - - if "normalized_text" not in item: - text = item["text"] - if self.text_normalizer is not None: - text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs) - file_info["normalized_text"] = text - else: - file_info["normalized_text"] = item["normalized_text"] - - if self.cache_text: - file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) - - if self.cache_text: - file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) - - data.append(file_info) - self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) - if file_info["duration"] is None: - logging.info( - "Not all audio files have duration information. Duration logging will be disabled." - ) - total_duration = None - - if total_duration is not None: - total_duration += item["duration"] - - logging.info(f"Loaded dataset with {len(data)} files.") - if total_duration is not None: - logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") - - self.data = VitsDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) - self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) - - random.seed(1234) - random.shuffle(self.data) - - self.add_blank = add_blank - # Initialize audio and mel related parameters - self.sample_rate = sample_rate - self.featurizer = WaveformFeaturizer(sample_rate=self.sample_rate) - self.trim = trim - - self.n_fft = n_fft - self.n_mels = n_mels - - - - @staticmethod - def filter_files(data, ignore_file, min_duration, max_duration, total_duration): - if ignore_file: - logging.info(f"Using {ignore_file} to prune dataset.") - with open(Path(ignore_file).expanduser(), "rb") as f: - wavs_to_ignore = set(pickle.load(f)) - - filtered_data: List[Dict] = [] - pruned_duration = 0 if total_duration is not None else None - pruned_items = 0 - for item in data: - audio_path = item['audio_filepath'] - - # Prune data according to min/max_duration & the ignore file - if total_duration is not None: - if (min_duration and item["duration"] < min_duration) or ( - max_duration and item["duration"] > max_duration - ): - pruned_duration += item["duration"] - pruned_items += 1 - continue - - if ignore_file and (audio_path in wavs_to_ignore): - pruned_items += 1 - pruned_duration += item["duration"] - wavs_to_ignore.remove(audio_path) - continue - - filtered_data.append(item) - - logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(filtered_data)} files") - if pruned_duration is not None: - logging.info( - f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains " - f"{(total_duration - pruned_duration) / 3600:.2f} hours." - ) - - return filtered_data - def get_spec(self, audio): - with torch.cuda.amp.autocast(enabled=False): - spec = torch.stft(audio, - n_fft=self.n_fft, - hop_length=self.hop_len, - win_length=self.win_length, - window=torch.hann_window(self.win_length, periodic=False).to(torch.float), - return_complex=True) - - if spec.dtype in [torch.cfloat, torch.cdouble]: - spec = torch.view_as_real(spec) - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-9) - return spec - - def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - - def __getitem__(self, index): - sample = self.data[index] - - # Load audio - features = self.featurizer.process(sample["audio_filepath"], trim=self.trim) - audio, audio_length = features, torch.tensor(features.shape[0]).long() - - tokenized = self.text_tokenizer(sample["normalized_text"]) - tokenized = intersperse(tokenized, 0) - text = torch.tensor(tokenized).long() - text_length = torch.tensor(len(tokenized)).long() - - return ( - audio, - audio_length, - text, - text_length, - ) +class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): + """ + Maintain similar input lengths in a batch. + Length groups are specified by boundaries. + Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}. + + It removes samples which are not included in the boundaries. + Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. + """ + def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): + super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) + self.lengths = dataset.lengths + self.batch_size = batch_size + self.boundaries = boundaries + + self.buckets, self.num_samples_per_bucket = self._create_buckets() + self.total_size = sum(self.num_samples_per_bucket) + self.num_samples = self.total_size // self.num_replicas + + def _create_buckets(self): + buckets = [[] for _ in range(len(self.boundaries) - 1)] + for i in range(len(self.lengths)): + length = self.lengths[i] + idx_bucket = self._bisect(length) + if idx_bucket != -1: + buckets[idx_bucket].append(i) + + for i in range(len(buckets) - 1, 0, -1): + if len(buckets[i]) == 0: + buckets.pop(i) + self.boundaries.pop(i+1) + + num_samples_per_bucket = [] + for i in range(len(buckets)): + len_bucket = len(buckets[i]) + total_batch_size = self.num_replicas * self.batch_size + rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size + num_samples_per_bucket.append(len_bucket + rem) + return buckets, num_samples_per_bucket + + def __iter__(self): + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = [] + if self.shuffle: + for bucket in self.buckets: + indices.append(torch.randperm(len(bucket), generator=g).tolist()) + else: + for bucket in self.buckets: + indices.append(list(range(len(bucket)))) + + batches = [] + for i in range(len(self.buckets)): + bucket = self.buckets[i] + len_bucket = len(bucket) + ids_bucket = indices[i] + num_samples_bucket = self.num_samples_per_bucket[i] + + # add extra samples to make it evenly divisible + rem = num_samples_bucket - len_bucket + ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] + + # subsample + ids_bucket = ids_bucket[self.rank::self.num_replicas] + + # batching + for j in range(len(ids_bucket) // self.batch_size): + batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] + batches.append(batch) + + if self.shuffle: + batch_ids = torch.randperm(len(batches), generator=g).tolist() + batches = [batches[i] for i in batch_ids] + self.batches = batches + + assert len(self.batches) * self.batch_size == self.num_samples + return iter(self.batches) + + def _bisect(self, x, lo=0, hi=None): + if hi is None: + hi = len(self.boundaries) - 1 + + if hi > lo: + mid = (hi + lo) // 2 + if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: + return mid + elif x <= self.boundaries[mid]: + return self._bisect(x, lo, mid) + else: + return self._bisect(x, mid + 1, hi) + else: + return -1 def __len__(self): - return len(self.data) - - def join_data(self, data_dict): - result = [] - for data_type in MAIN_DATA_TYPES: - result.append(data_dict[data_type.name]) - - if issubclass(data_type, TTSDataType) and issubclass(data_type, WithLens): - result.append(data_dict[f"{data_type.name}_lens"]) + return self.num_samples // self.batch_size - return tuple(result) - - def general_collate_fn(self, batch): - ( - _, - audio_lengths, - _, - tokens_lengths, - ) = zip(*batch) - - max_audio_len = max(audio_lengths).item() - max_tokens_len = max(tokens_lengths).item() - - audios, tokens = [], [] - - for i, sample_tuple in enumerate(batch): - ( - audio, - audio_len, - token, - token_len, - ) = sample_tuple - - audio = general_padding(audio, audio_len.item(), max_audio_len) - audios.append(audio) - - token = general_padding(token, token_len.item(), max_tokens_len, pad_value=self.text_tokenizer_pad_id) - tokens.append(token) - - - data_dict = { - "audio": torch.stack(audios), - "audio_lens": torch.stack(audio_lengths), - "text": torch.stack(tokens), - "text_lens": torch.stack(tokens_lengths), - } - - return data_dict - - def _collate_fn(self, batch): - data_dict = self.general_collate_fn(batch) - joined_data = self.join_data(data_dict) - return joined_data + def set_epoch(self, epoch: int) -> None: + """ + Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas + use a different random ordering for each epoch. Otherwise, the next iteration of this + sampler will yield the same ordering. + Args: + epoch (int): Epoch number. + """ + self.epoch = epoch \ No newline at end of file diff --git a/nemo/collections/tts/torch/g2ps.py b/nemo/collections/tts/torch/g2ps.py index c5bf9523c251..554f214975ba 100644 --- a/nemo/collections/tts/torch/g2ps.py +++ b/nemo/collections/tts/torch/g2ps.py @@ -12,287 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc -import pathlib -import random -import re -import time -from typing import Optional - -import nltk -import torch -import phonemizer -from phonemizer import phonemize - -from nemo.collections.tts.torch.en_utils import english_word_tokenize -from nemo.utils import logging -from nemo.utils.get_rank import is_global_rank_zero - -global_phonemizer = phonemizer.backend.EspeakBackend(language='en-us', preserve_punctuation=True, with_stress=True) - -_alt_re = re.compile(r'\([0-9]+\)') -_whitespace_re = re.compile(r'\s+') - -class BaseG2p(abc.ABC): - def __init__( - self, phoneme_dict=None, word_tokenize_func=lambda x: x, apply_to_oov_word=None, - ): - """Abstract class for creating an arbitrary module to convert grapheme words to phoneme sequences (or leave unchanged or use apply_to_oov_word). - Args: - phoneme_dict: Arbitrary representation of dictionary (phoneme -> grapheme) for known words. - word_tokenize_func: Function for tokenizing text to words. - apply_to_oov_word: Function that will be applied to out of phoneme_dict word. - """ - self.phoneme_dict = phoneme_dict - self.word_tokenize_func = word_tokenize_func - self.apply_to_oov_word = apply_to_oov_word - - @abc.abstractmethod - def __call__(self, text: str) -> str: - pass - - -class EnglishG2p(BaseG2p): - def __init__( - self, - phoneme_dict=None, - word_tokenize_func=english_word_tokenize, - apply_to_oov_word=None, - ignore_ambiguous_words=True, - heteronyms=None, - encoding='latin-1', - phoneme_probability: Optional[float] = None, - ): - """English G2P module. This module converts words from grapheme to phoneme representation using phoneme_dict in CMU dict format. - Optionally, it can ignore words which are heteronyms, ambiguous or marked as unchangeable by word_tokenize_func (see code for details). - Ignored words are left unchanged or passed through apply_to_oov_word. - Args: - phoneme_dict (str, Path, Dict): Path to file in CMU dict format or dictionary in CMU dict. - word_tokenize_func: Function for tokenizing text to words. - It has to return List[Tuple[Union[str, List[str]], bool]] where every tuple denotes word representation and flag whether to leave unchanged or not. - It is expected that unchangeable word representation will be represented as List[str], other cases are represented as str. - It is useful to mark word as unchangeable which is already in phoneme representation. - apply_to_oov_word: Function that will be applied to out of phoneme_dict word. - ignore_ambiguous_words: Whether to not handle word via phoneme_dict with ambiguous phoneme sequences. Defaults to True. - heteronyms (str, Path, List): Path to file with heteronyms (every line is new word) or list of words. - encoding: Encoding type. - phoneme_probability (Optional[float]): The probability (0. self.phoneme_probability: - return word, True - - # punctuation - if re.search("[a-zA-Z]", word) is None: - return list(word), True - - # heteronym - if self.heteronyms is not None and word in self.heteronyms: - return word, True - - # `'s` suffix - if ( - len(word) > 2 - and word.endswith("'s") - and (word not in self.phoneme_dict) - and (word[:-2] in self.phoneme_dict) - and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-2])) - ): - return self.phoneme_dict[word[:-2]][0] + ["Z"], True - - # `s` suffix - if ( - len(word) > 1 - and word.endswith("s") - and (word not in self.phoneme_dict) - and (word[:-1] in self.phoneme_dict) - and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word[:-1])) - ): - return self.phoneme_dict[word[:-1]][0] + ["Z"], True - - # phoneme dict - if word in self.phoneme_dict and (not self.ignore_ambiguous_words or self.is_unique_in_phoneme_dict(word)): - return self.phoneme_dict[word][0], True - - if self.apply_to_oov_word is not None: - return self.apply_to_oov_word(word), False - else: - return word, False - - def __call__(self, text): - words = self.word_tokenize_func(text) - - prons = [] - for word, without_changes in words: - if without_changes: - prons.extend(word) - continue - - word_by_hyphen = word.split("-") - - pron, is_handled = self.parse_one_word(word) - - if not is_handled and len(word_by_hyphen) > 1: - pron = [] - for sub_word in word_by_hyphen: - p, _ = self.parse_one_word(sub_word) - pron.extend(p) - pron.extend(["-"]) - pron.pop() - - prons.extend(pron) - - return prons - - -class IPAG2p(BaseG2p): - def __init__( - self, - strip=True, - njobs=1 - ): - """IPA G2P module. This module converts words from grapheme to phoneme representation using phoneme_dict in CMU dict format. - Optionally, it can ignore words which are heteronyms, ambiguous or marked as unchangeable by word_tokenize_func (see code for details). - Ignored words are left unchanged or passed through apply_to_oov_word. - Args: - phoneme_dict (str, Path, Dict): Path to file in CMU dict format or dictionary in CMU dict. - word_tokenize_func: Function for tokenizing text to words. - It has to return List[Tuple[Union[str, List[str]], bool]] where every tuple denotes word representation and flag whether to leave unchanged or not. - It is expected that unchangeable word representation will be represented as List[str], other cases are represented as str. - It is useful to mark word as unchangeable which is already in phoneme representation. - apply_to_oov_word: Function that will be applied to out of phoneme_dict word. - ignore_ambiguous_words: Whether to not handle word via phoneme_dict with ambiguous phoneme sequences. Defaults to True. - heteronyms (str, Path, List): Path to file with heteronyms (every line is new word) or list of words. - encoding: Encoding type. - """ - self.strip = strip - self.njobs = njobs - - - @staticmethod - def _parse_file_by_lines(p, encoding): - with open(p, encoding=encoding) as f: - return [l.rstrip() for l in f.readlines()] - - - def parse_one_word(self, word: str): - """ - Returns parsed `word` and `status` as bool. - `status` will be `False` if word wasn't handled, `True` otherwise. - """ - - # punctuation - if re.search("[a-zA-Z]", word) is None: - return list(word), True - - word = global_phonemizer.phonemize([word], strip=self.strip, njobs=self.njobs) - word = re.sub(_whitespace_re, ' ', word[0]) - - return word, True - - def __call__(self, text): - g2p_text = global_phonemizer.phonemize([text], strip=self.strip, njobs=self.njobs) - g2p_text = re.sub(_whitespace_re, ' ', g2p_text[0]) - - return g2p_text # TODO (xueyang): deprecate this file since no other places import modules from here anymore. However, # all checkpoints uploaded in ngc used this path. So it requires to update all ngc checkpoints g2p path as well. from nemo_text_processing.g2p.modules import IPAG2P, BaseG2p, EnglishG2p From 58662111b031272b64bc13476f5a4c6150e1321e Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 8 Nov 2022 04:33:17 -0800 Subject: [PATCH 160/244] strict ptl version --- requirements/requirements_lightning.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index 259bd1289dc7..a3d8aaa0ce45 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning>=1.7.0,<=1.7.7 +pytorch-lightning==1.7.0#,<=1.7.7 torchmetrics>=0.4.1rc0 transformers>=4.0.1,<=4.21.2 webdataset>=0.1.48,<=0.1.62 From 93a73ec2c5216ad827b6660a06612fb5d2c43941 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 8 Nov 2022 04:39:18 -0800 Subject: [PATCH 161/244] strict ptl version --- requirements/requirements_lightning.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index a3d8aaa0ce45..72c0e725ce4e 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning==1.7.0#,<=1.7.7 +pytorch-lightning==1.7.0 torchmetrics>=0.4.1rc0 transformers>=4.0.1,<=4.21.2 webdataset>=0.1.48,<=0.1.62 From 67d231795d90faaa64d8ea41e563a620e86b8646 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 8 Nov 2022 04:48:43 -0800 Subject: [PATCH 162/244] reverted plt version --- requirements/requirements_lightning.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index 72c0e725ce4e..180b58b1017b 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning==1.7.0 +pytorch-lightning>=1.7.0, <=1.7.7 torchmetrics>=0.4.1rc0 transformers>=4.0.1,<=4.21.2 webdataset>=0.1.48,<=0.1.62 From e2adafbc39db6ad80b4e4f5e8c457fb8264819e0 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 8 Nov 2022 06:16:26 -0800 Subject: [PATCH 163/244] Added base text2audio class --- nemo/collections/tts/models/base.py | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/nemo/collections/tts/models/base.py b/nemo/collections/tts/models/base.py index a7ce3b603eca..3872722d1de7 100644 --- a/nemo/collections/tts/models/base.py +++ b/nemo/collections/tts/models/base.py @@ -227,3 +227,38 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': if subclass_models is not None and len(subclass_models) > 0: list_of_models.extend(subclass_models) return list_of_models + +class TextToWaveform(ModelPT, ABC): + """ Base class for all end-to-end TTS models that generate a waveform from text """ + + @abstractmethod + def parse(self, str_input: str, **kwargs) -> 'torch.tensor': + """ + A helper function that accepts raw python strings and turns them into a tensor. The tensor should have 2 + dimensions. The first is the batch, which should be of size 1. The second should represent time. The tensor + should represent either tokenized or embedded text, depending on the model. + """ + + @abstractmethod + def convert_text_to_waveform(self, *, tokens: 'torch.tensor', **kwargs) -> 'List[torch.tensor]': + """ + Accepts a batch of text and returns a list containing a batch of audio + Args: + tokens: A torch tensor representing the text to be converted to speech + Returns: + audio: A list of length batch_size containing torch tensors representing the waveform output + """ + + @classmethod + def list_available_models(cls) -> 'List[PretrainedModelInfo]': + """ + This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud. + Returns: + List of available pre-trained models. + """ + list_of_models = [] + for subclass in cls.__subclasses__(): + subclass_models = subclass.list_available_models() + if subclass_models is not None and len(subclass_models) > 0: + list_of_models.extend(subclass_models) + return list_of_models \ No newline at end of file From f67fe9566790e4ae5245d218af6f2b3687bed237 Mon Sep 17 00:00:00 2001 From: Somshubra Majumdar Date: Tue, 8 Nov 2022 14:49:41 -0800 Subject: [PATCH 164/244] Fix issue with HF Model upload tutorial (#5359) * Add Gradio App to ASR Docs (#5270) Signed-off-by: smajumdar Signed-off-by: smajumdar (cherry picked from commit e4b6a387e3b3d9cdf511f7b9bbb5e94925e48cc2) * Fix issue with normalized config for dataset name Signed-off-by: smajumdar Signed-off-by: smajumdar --- docs/source/asr/intro.rst | 12 ++++++++++++ .../Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb | 16 +++++++++++----- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/docs/source/asr/intro.rst b/docs/source/asr/intro.rst index 8e2e3ba699ec..e655da836a76 100644 --- a/docs/source/asr/intro.rst +++ b/docs/source/asr/intro.rst @@ -28,6 +28,18 @@ we could integrate a language model that would improve our predictions, as well. And the entire end-to-end ASR model can be trained at once--a much easier pipeline to handle! +A demo below allows evaluation of NeMo ASR models in multiple langauges from the browser: + +.. raw:: html + + + + + + The full documentation tree is as follows: .. toctree:: diff --git a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb index 1ecb17e83b06..a13174033e0c 100644 --- a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb +++ b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb @@ -458,6 +458,7 @@ { "cell_type": "code", "source": [ + "# Replace all spaces with `-`\n", "DATASETS = [\n", " \"librispeech_asr\",\n", " \"mozilla-foundation/common_voice_7_0\",\n", @@ -466,11 +467,11 @@ " \"Switchboard-1\",\n", " \"WSJ-0\",\n", " \"WSJ-1\",\n", - " \"National Singapore Corpus Part 1\",\n", - " \"National Singapore Corpus Part 6\",\n", - " \"VoxPopuli (EN)\",\n", - " \"Europarl-ASR (EN)\",\n", - " \"Multilingual LibriSpeech (2000 hours)\",\n", + " \"National-Singapore-Corpus-Part-1\",\n", + " \"National-Singapore-Corpus-Part-6\",\n", + " \"VoxPopuli-(EN)\",\n", + " \"Europarl-ASR-(EN)\",\n", + " \"Multilingual-LibriSpeech-(2000-hours)\",\n", "]" ], "metadata": { @@ -520,9 +521,14 @@ "config = OmegaConf.structured(config)\n", "\n", "with open_dict(config):\n", + " # Update `model_index` to `model-index`\n", " model_index = config.pop('model_index')\n", " config['model-index'] = model_index\n", "\n", + " # Replace all spaces with `-` in datasets\n", + " normalized_datasets = [ds_name.replace(\" \", \"-\") for ds_name in config['datasets']]\n", + " config['datasets'] = OmegaConf.create(normalized_datasets)\n", + "\n", "print(OmegaConf.to_yaml(config))" ], "metadata": { From 4eb4351a16ca9e5d3b06f84f2c66ca318793ef9a Mon Sep 17 00:00:00 2001 From: Matvei Novikov Date: Wed, 9 Nov 2022 03:35:00 +0400 Subject: [PATCH 165/244] tutorial fixes (#5354) Signed-off-by: Matvei Novikov Signed-off-by: Matvei Novikov --- ...ctuation_and_Capitalization_Lexical_Audio.ipynb | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index 4c20cae8af19..57d443ddf5df 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -936,20 +936,22 @@ "outputs": [], "source": [ "# let's reload our pretrained model\n", - "pretrained_model = nemo_nlp.models.PunctuationCapitalizationLexicalAudioModel.from_pretrained('Punctuation_And_Capitalization_Lexical_Audio/checkpoints/Punctuation_and_Capitalization_Lexical_Audio.nemo')\n", + "pretrained_model = nemo_nlp.models.PunctuationCapitalizationLexicalAudioModel.restore_from('Punctuation_And_Capitalization_Lexical_Audio/checkpoints/Punctuation_and_Capitalization_Lexical_Audio.nemo')\n", "\n", "# setup train and validation Pytorch DataLoaders\n", "pretrained_model.update_config_after_restoring_from_checkpoint(\n", " train_ds={\n", " 'ds_item': DATA_DIR,\n", - " 'text_file': 'text_train.txt',\n", - " 'labels_file': 'labels_train.txt',\n", + " 'text_file': 'text_dev.txt',\n", + " 'labels_file': 'labels_dev.txt',\n", + " 'audio_file': 'audio_dev.txt',\n", " 'tokens_in_batch': 1024,\n", " },\n", " validation_ds={\n", " 'ds_item': DATA_DIR,\n", " 'text_file': 'text_dev.txt',\n", " 'labels_file': 'labels_dev.txt',\n", + " 'audio_file': 'audio_dev.txt',\n", " 'tokens_in_batch': 1024,\n", " },\n", ")\n", @@ -960,8 +962,8 @@ "fast_dev_run = True\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', fast_dev_run=fast_dev_run)\n", "pretrained_model.set_trainer(trainer)\n", - "pretrained_model.setup_training_data()\n", - "pretrained_model.setup_validation_data()\n", + "pretrained_model.setup_training_data(pretrained_model.cfg.train_ds)\n", + "pretrained_model.setup_validation_data(pretrained_model.cfg.validation_ds)\n", "trainer.fit(pretrained_model)" ] }, @@ -997,7 +999,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.8.13" } }, "nbformat": 4, From 9c3f35836e2aea48a97ce4cce33483702d31c6b5 Mon Sep 17 00:00:00 2001 From: Elena Rastorgueva <80532067+erastorgueva-nv@users.noreply.github.com> Date: Wed, 9 Nov 2022 11:44:12 -0800 Subject: [PATCH 166/244] Add SDP documentation (#5274) * Add details to SDP README.md Signed-off-by: Elena Rastorgueva * Add docstring to WriteManifest processor Signed-off-by: Elena Rastorgueva * Add docstring to CreateInitialManifestMLS Signed-off-by: Elena Rastorgueva * Add ModifyManifestTextProcessor docstring Signed-off-by: Elena Rastorgueva * Add ASRInference docstring Signed-off-by: Elena Rastorgueva * Add base_processor docstrings Signed-off-by: Elena Rastorgueva * Add minimal SDP docs page Signed-off-by: Elena Rastorgueva * Update tools/speech_dataset_processor/README.md Co-authored-by: Igor Gitman Signed-off-by: Elena Rastorgueva <80532067+erastorgueva-nv@users.noreply.github.com> * Write simple README for SDP and move complex explanations to docs Signed-off-by: Elena Rastorgueva * Remove incorrect type hints Signed-off-by: Elena Rastorgueva * Make config example less confusing Signed-off-by: Elena Rastorgueva * Fix typo Signed-off-by: Elena Rastorgueva * Clarify that YAML file is config file in README Signed-off-by: Elena Rastorgueva * Remove unused imports Signed-off-by: Elena Rastorgueva * Remove SDP docs for now Signed-off-by: Elena Rastorgueva * Remove links to docs in SDP README Signed-off-by: Elena Rastorgueva Signed-off-by: Elena Rastorgueva Signed-off-by: Elena Rastorgueva <80532067+erastorgueva-nv@users.noreply.github.com> Co-authored-by: Igor Gitman --- tools/speech_dataset_processor/README.md | 68 ++++++++++++++++++- .../sdp/processors/asr_inference.py | 13 +++- .../sdp/processors/base_processor.py | 21 ++++-- .../create_initial_manifest_mls.py | 23 ++++++- .../modify_manifest/modify_manifest.py | 12 +++- .../sdp/processors/write_manifest.py | 13 +++- 6 files changed, 136 insertions(+), 14 deletions(-) diff --git a/tools/speech_dataset_processor/README.md b/tools/speech_dataset_processor/README.md index 992c1da656e5..31f22f5d81bf 100644 --- a/tools/speech_dataset_processor/README.md +++ b/tools/speech_dataset_processor/README.md @@ -1,7 +1,69 @@ # Speech Dataset Processor -Toolkit to make it easy to write and share the steps for processing a speech dataset. +Speech Dataset Processor (SDP) is a toolkit to make it easy to: +1. write code to process a new dataset, minimizing the amount of boilerplate code required. +2. share the steps for processing a speech dataset. Sharing processing steps can be as easy as sharing a YAML file. -This toolkit contains many of the most common speech dataset processing operations. To process a new dataset, you simply need to write a YAML file containing the parameters needed for dataset processing. It is also easy to add your own code for various speech dataset processing steps if needed. +SDP's philosophy is to represent processing operations as 'processor' classes. Many common processing operations are provided, and it is easy to add your own. In some cases, all you will need to do to process a new dataset is simply to write a YAML file containing the parameters needed to process your dataset. -TBD +SDP is specifically intended for the use case when you have an existing dataset with the audio & text pairs already specified in some form, and you wish to create a JSON manifest suitable for use with NeMo. SDP allows for intermediate cleaning and filtering steps which involve amending the 'ground truth' `"text"` or dropping utterances which are deemed to be too inaccurate for training on. + +## Quick intro to Speech Dataset Processor + +* The steps to process a dataset are specified by a YAML config file. +* The YAML config file contains a list of processor classes & the args to pass into the constructor. +* Each processor class inputs an existing manifest (except for classes which create an 'initial' manifest from some external transcript file) & outputs a modified version of the manifest. It may change other files in the process, e.g. resample audio. +* To process a manifest, you need to list the chain of processors you wish to use. +* If a processor is not included, you can make your own. + +## YAML config file layout +A simplified version of an SDP file can be: + +```yaml +processors: + + # use existing classes for popular datasets or make your own class + - _target_: sdp.processors.CreateInitialManifestMLS + output_manifest_file: ... + download_dir: ... + ... + + # use existing classes for common operations or write your own + - _target_: sdp.processors.SubSubstringToSubstring + + substring_pairs: { + # specify the parameters needed for your usecase + " mr ": " mister ", + " misteak ": " mistake ", + ... + } + + - _target_: sdp.processors.DropNonAlphabet + alphabet: " abcdefghijklmnopqrstuvwxyz" + output_manifest_file: ... + ... +``` +## Existing processor classes +In addition to those mentioned in the example config file, many more classes are already included in Speech Dataset Processor, for example: +* `sdp.processors.ASRInference` will run inference on the manifest using a specified `pretrained_model`. +* `sdp.processors.DropHighWER` will compute WER between `text` and `pred_text` of each utterance and remove the utterance if WER is greater than the specified `wer_threshold`. +* `sdp.processors.DropHighLowCharrate` will compute the character rate in the utterance using `text` and `duration`, and drop the utterance if it is outside the bounds of the specified `high_charrate_threshold` and `low_charrate_threshold`. Carefully chosen thresholds will allow us to drop utterances with incorrect ground truth `text`. + +## Processor test cases +You can add test cases to verify you have specified your desired changes correctly and to help document why your are making these changes. + +For example: +```yaml +processors: + ... + - _target_: sdp.processors.DropIfRegexInAttribute + attribute_to_regex: + "text" : ["(\\D ){5,20}"] # looks for between 4 and 19 characters surrounded by spaces + + test_cases: + - {input: {text: "some s p a c e d out letters"}, output: null} + - {input: {text: "normal words only"}, output: {text: "normal words only"}} + - {input: {text: "three a b c spaced out letters"}, output: {text: "three a b c spaced out letters"}} + - {input: {text: "four a b c d spaced out letters"}, output: null} + ... +``` \ No newline at end of file diff --git a/tools/speech_dataset_processor/sdp/processors/asr_inference.py b/tools/speech_dataset_processor/sdp/processors/asr_inference.py index 6ace462d7e39..98bf43b45b90 100644 --- a/tools/speech_dataset_processor/sdp/processors/asr_inference.py +++ b/tools/speech_dataset_processor/sdp/processors/asr_inference.py @@ -20,7 +20,14 @@ class ASRInference(BaseProcessor): - """This processor perforce ASR inference. + """This processor performs ASR inference on the input manifest. + + Args: + output_manifest: the path to the output manifest. It will be the same as the input manifest, but will + also have "pred_true" entries for every utterance. + input_manifest_file: the path to the input manifest which will be transcribed. + pretrained_model: the name of the pretrained NeMo ASR model which will be used to do inference. + batch_size: the batch size to use for ASR inference. Note that it does not re-use base parallel implementation, since the ASR inference is already run in batches. @@ -29,7 +36,9 @@ class ASRInference(BaseProcessor): parallelization, but that needs to be tested. """ - def __init__(self, output_manifest_file, input_manifest_file, pretrained_model, batch_size=32): + def __init__( + self, output_manifest_file: str, input_manifest_file: str, pretrained_model: str, batch_size: int = 32 + ): self.output_manifest_file = output_manifest_file self.input_manifest_file = input_manifest_file self.script_path = Path(__file__).parents[4] / "examples" / "asr" / "transcribe_speech.py" diff --git a/tools/speech_dataset_processor/sdp/processors/base_processor.py b/tools/speech_dataset_processor/sdp/processors/base_processor.py index a51b3de1178b..2bbad5da6484 100644 --- a/tools/speech_dataset_processor/sdp/processors/base_processor.py +++ b/tools/speech_dataset_processor/sdp/processors/base_processor.py @@ -34,6 +34,17 @@ class DataEntry: class BaseProcessor(ABC): + """ + Abstract class for SDP processors. + + Args + output_manifest_file: path of where the output manifest file will be located. + input_manifest_file: path of where the input manifest file is located. This arg + is optional - some processors may not take in an input manifest because they + need to create an initial manifest from scratch (ie from some transcript file + that is in a format different to the NeMo manifest format). + """ + def __init__(self, output_manifest_file, input_manifest_file=None): self.output_manifest_file = output_manifest_file self.input_manifest_file = input_manifest_file @@ -55,13 +66,15 @@ def test(self): class BaseParallelProcessor(BaseProcessor): """ - TBD + Processor class which allows operations on each utterance to be parallelized. Parallelization + is done using tqdm.contrib.concurrent.process_map. - input_manifest_file should always be specified unless it's the first - processor that reads from original dataset representation. + Args: + max_workers: maximum number of workers that will be spawned during parallel processing. + chunksize: the size of the chunks that will be sent to worker processes. """ - def __init__(self, max_workers=-1, chunksize=100, **kwargs): + def __init__(self, max_workers: int = -1, chunksize: int = 100, **kwargs): super().__init__(**kwargs) if max_workers == -1: max_workers = multiprocessing.cpu_count() diff --git a/tools/speech_dataset_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py b/tools/speech_dataset_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py index 1ff9e914fe1b..97f224cb69de 100644 --- a/tools/speech_dataset_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py +++ b/tools/speech_dataset_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py @@ -25,8 +25,27 @@ class CreateInitialManifestMLS(BaseParallelProcessor): + """ + Downloads and unzips raw MLS data for the specified language, and creates an initial manifest using + the transcripts provided in the raw data. + + Args: + language: the language of the data you wish to be downloaded. This will be used to format the + URL from which we attempt to download the data. + download_dir: the directory where the downloaded data will be saved. + data_split: the data split for which the initial manifest will be created. + resampled_audio_dir: the directory where the resampled (16kHz) wav files will be stored. + use_test_data: if `True`, will use the test data manifest located at `TEST_DATA_PATH` to carry out tests. + """ + def __init__( - self, language, download_dir, resampled_audio_dir, data_split, use_test_data=False, **kwargs, + self, + language: str, + download_dir: str, + resampled_audio_dir: str, + data_split: str, + use_test_data: bool = False, + **kwargs, ): super().__init__(**kwargs) self.language = language @@ -65,7 +84,7 @@ def read_manifest(self): return dataset_entries - def process_dataset_entry(self, data_entry): + def process_dataset_entry(self, data_entry: str): if len(data_entry.split("\t")) != 2: raise RuntimeError(f"have more than one tab in line {data_entry}") diff --git a/tools/speech_dataset_processor/sdp/processors/modify_manifest/modify_manifest.py b/tools/speech_dataset_processor/sdp/processors/modify_manifest/modify_manifest.py index 5c1c0d808848..5c8ceefebe8e 100644 --- a/tools/speech_dataset_processor/sdp/processors/modify_manifest/modify_manifest.py +++ b/tools/speech_dataset_processor/sdp/processors/modify_manifest/modify_manifest.py @@ -23,12 +23,20 @@ class ModifyManifestTextProcessor(BaseParallelProcessor): """Base class useful for most "text-only" modifications of the manifest. - Will add the following functionality: - - Add space in the beginning and end of sentence for easier regex-based + This adds the following functionality on top of BaseParallelProcessor + - Adds space in the beginning and end of sentence for easier regex-based processing. - Automatically handles common test cases by comparing input to output values. + Args: + test_cases: an optional list of dicts containing test cases for checking + that the processor makes the changes that we are expecting. + The dicts must have a key 'input', the value of which is a dictionary + containing data which is our test input manifest line, and a key + 'output', the value of which is a dictionary containing data which is + the expected output manifest line. + .. note:: This class only supports one-to-one or one-to-none mappings. """ diff --git a/tools/speech_dataset_processor/sdp/processors/write_manifest.py b/tools/speech_dataset_processor/sdp/processors/write_manifest.py index 1f2d3ef12f2b..f601985a1647 100644 --- a/tools/speech_dataset_processor/sdp/processors/write_manifest.py +++ b/tools/speech_dataset_processor/sdp/processors/write_manifest.py @@ -13,13 +13,24 @@ # limitations under the License. import json +from typing import List from sdp.processors.base_processor import BaseProcessor from tqdm import tqdm class WriteManifest(BaseProcessor): - def __init__(self, output_manifest_file, input_manifest_file, fields_to_save): + """ + Saves a copy of a manifest but only with the fields specified in fields_to_save. + + Args: + output_manifest_file: path of where the output file will be saved. + input_manifest_file: path of where the input file that we will be copying is saved. + fields_to_save: list of the fields in the input manifest that we want to copy over. + The output file will only contain these fields. + """ + + def __init__(self, output_manifest_file: str, input_manifest_file: str, fields_to_save: List[str]): self.output_manifest_file = output_manifest_file self.input_manifest_file = input_manifest_file self.fields_to_save = fields_to_save From 6d9a8d20c1ea0183fc9cb441e717497d70dca421 Mon Sep 17 00:00:00 2001 From: Taejin Park Date: Wed, 9 Nov 2022 13:20:33 -0800 Subject: [PATCH 167/244] [Bugfix] Added rm -f / wget- nc command in multispeaker sim notebook to r1.13.0 (#5375) * Fix minor error in notebook Signed-off-by: Taejin Park * changed branch name in tutorial notebook Signed-off-by: Taejin Park Signed-off-by: Taejin Park --- tutorials/tools/Multispeaker_Simulator.ipynb | 21 ++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tutorials/tools/Multispeaker_Simulator.ipynb b/tutorials/tools/Multispeaker_Simulator.ipynb index e8d8d1b09f89..e9822fd0ea9e 100644 --- a/tutorials/tools/Multispeaker_Simulator.ipynb +++ b/tutorials/tools/Multispeaker_Simulator.ipynb @@ -59,7 +59,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -76,7 +76,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -90,18 +90,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The LibriSpeech forced word alignments are from [this repository.](https://github.com/CorentinJ/librispeech-alignments). You can access to the whole LibriSpeech splits at this google drive link [link](https://drive.google.com/file/d/1WYfgr31T-PPwMcxuAq09XZfHQO5Mw8fE/view?usp=sharing). We will download the dev-clean part for demo purpose." + "The LibriSpeech forced word alignments are from [this repository](https://github.com/CorentinJ/librispeech-alignments). You can access to the whole LibriSpeech splits at this google drive [link](https://drive.google.com/file/d/1WYfgr31T-PPwMcxuAq09XZfHQO5Mw8fE/view?usp=sharing). We will download the dev-clean part for demo purpose." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "!wget https://dldata-public.s3.us-east-2.amazonaws.com/LibriSpeech_Alignments.tar.gz\n", - "!tar -xvzf LibriSpeech_Alignments.tar.gz\n", - "!rm LibriSpeech-Alignments.zip" + "!wget -nc https://dldata-public.s3.us-east-2.amazonaws.com/LibriSpeech_Alignments.tar.gz\n", + "!tar -xzf LibriSpeech_Alignments.tar.gz\n", + "!rm -f LibriSpeech_Alignments.tar.gz" ] }, { @@ -117,7 +117,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -144,8 +144,9 @@ "metadata": {}, "outputs": [], "source": [ - "!wget https://www.openslr.org/resources/28/rirs_noises.zip\n", - "!unzip -o rirs_noises.zip" + "!wget -nc https://www.openslr.org/resources/28/rirs_noises.zip\n", + "!unzip -o rirs_noises.zip\n", + "!rm -f rirs_noises.zip" ] }, { From 5d97264524ef381894ee4bc57f71e9c910ee3dff Mon Sep 17 00:00:00 2001 From: Elena Rastorgueva <80532067+erastorgueva-nv@users.noreply.github.com> Date: Wed, 9 Nov 2022 15:35:35 -0800 Subject: [PATCH 168/244] Rename Speech Dataset Processor to Speech Data Processor (#5378) Signed-off-by: Elena Rastorgueva Signed-off-by: Elena Rastorgueva --- Jenkinsfile | 6 +++--- .../README.md | 8 ++++---- .../__init__.py | 0 .../dataset_configs/spanish/mls/config_mls_es.yaml | 0 .../spanish/mls/unique_processors/clean_roman_numerals.py | 0 .../main.py | 0 .../requirements.txt | 0 .../sdp/__init__.py | 0 .../sdp/processors/__init__.py | 0 .../sdp/processors/asr_inference.py | 0 .../sdp/processors/base_processor.py | 0 .../sdp/processors/create_initial_manifest/__init__.py | 0 .../create_initial_manifest_mls.py | 0 .../sdp/processors/modify_manifest/__init__.py | 0 .../sdp/processors/modify_manifest/data_to_data.py | 0 .../sdp/processors/modify_manifest/data_to_dropbool.py | 0 .../sdp/processors/modify_manifest/modify_manifest.py | 0 .../sdp/processors/write_manifest.py | 0 .../sdp/run_processors.py | 0 .../sdp/utils/__init__.py | 0 .../sdp/utils/common.py | 0 .../sdp/utils/edit_spaces.py | 0 .../sdp/utils/get_diff.py | 0 .../sdp/utils/metrics_computation.py | 0 .../tests/__init__.py | 0 .../tests/prepare_test_data/prepare_mls_data.py | 0 .../tests/test_all_cfgs.py | 0 .../tests/test_data_to_data.py | 0 .../tests/test_data_to_dropbool.py | 0 .../tests/test_utils.py | 0 30 files changed, 7 insertions(+), 7 deletions(-) rename tools/{speech_dataset_processor => speech_data_processor}/README.md (94%) rename tools/{speech_dataset_processor => speech_data_processor}/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/dataset_configs/spanish/mls/config_mls_es.yaml (100%) rename tools/{speech_dataset_processor => speech_data_processor}/dataset_configs/spanish/mls/unique_processors/clean_roman_numerals.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/main.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/requirements.txt (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/asr_inference.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/base_processor.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/create_initial_manifest/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/modify_manifest/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/modify_manifest/data_to_data.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/modify_manifest/data_to_dropbool.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/modify_manifest/modify_manifest.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/processors/write_manifest.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/run_processors.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/utils/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/utils/common.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/utils/edit_spaces.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/utils/get_diff.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/sdp/utils/metrics_computation.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/tests/__init__.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/tests/prepare_test_data/prepare_mls_data.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/tests/test_all_cfgs.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/tests/test_data_to_data.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/tests/test_data_to_dropbool.py (100%) rename tools/{speech_dataset_processor => speech_data_processor}/tests/test_utils.py (100%) diff --git a/Jenkinsfile b/Jenkinsfile index 31364fe3ea4b..d0bba5e46472 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -121,7 +121,7 @@ pipeline { } } - stage('L0: Unit Tests Speech Dataset Processor') { + stage('L0: Unit Tests Speech Data Processor') { when { anyOf { branch 'r1.13.0' @@ -129,8 +129,8 @@ pipeline { } } steps { - sh 'pip install -r tools/speech_dataset_processor/requirements.txt' - sh 'cd tools/speech_dataset_processor && CUDA_VISIBLE_DEVICES="" pytest tests -m "not pleasefixme"' + sh 'pip install -r tools/speech_data_processor/requirements.txt' + sh 'cd tools/speech_data_processor && CUDA_VISIBLE_DEVICES="" pytest tests -m "not pleasefixme"' } } diff --git a/tools/speech_dataset_processor/README.md b/tools/speech_data_processor/README.md similarity index 94% rename from tools/speech_dataset_processor/README.md rename to tools/speech_data_processor/README.md index 31f22f5d81bf..58547c2e8317 100644 --- a/tools/speech_dataset_processor/README.md +++ b/tools/speech_data_processor/README.md @@ -1,6 +1,6 @@ -# Speech Dataset Processor +# Speech Data Processor -Speech Dataset Processor (SDP) is a toolkit to make it easy to: +Speech Data Processor (SDP) is a toolkit to make it easy to: 1. write code to process a new dataset, minimizing the amount of boilerplate code required. 2. share the steps for processing a speech dataset. Sharing processing steps can be as easy as sharing a YAML file. @@ -8,7 +8,7 @@ SDP's philosophy is to represent processing operations as 'processor' classes. M SDP is specifically intended for the use case when you have an existing dataset with the audio & text pairs already specified in some form, and you wish to create a JSON manifest suitable for use with NeMo. SDP allows for intermediate cleaning and filtering steps which involve amending the 'ground truth' `"text"` or dropping utterances which are deemed to be too inaccurate for training on. -## Quick intro to Speech Dataset Processor +## Quick intro to Speech Data Processor * The steps to process a dataset are specified by a YAML config file. * The YAML config file contains a list of processor classes & the args to pass into the constructor. @@ -44,7 +44,7 @@ processors: ... ``` ## Existing processor classes -In addition to those mentioned in the example config file, many more classes are already included in Speech Dataset Processor, for example: +In addition to those mentioned in the example config file, many more classes are already included in Speech Data Processor, for example: * `sdp.processors.ASRInference` will run inference on the manifest using a specified `pretrained_model`. * `sdp.processors.DropHighWER` will compute WER between `text` and `pred_text` of each utterance and remove the utterance if WER is greater than the specified `wer_threshold`. * `sdp.processors.DropHighLowCharrate` will compute the character rate in the utterance using `text` and `duration`, and drop the utterance if it is outside the bounds of the specified `high_charrate_threshold` and `low_charrate_threshold`. Carefully chosen thresholds will allow us to drop utterances with incorrect ground truth `text`. diff --git a/tools/speech_dataset_processor/__init__.py b/tools/speech_data_processor/__init__.py similarity index 100% rename from tools/speech_dataset_processor/__init__.py rename to tools/speech_data_processor/__init__.py diff --git a/tools/speech_dataset_processor/dataset_configs/spanish/mls/config_mls_es.yaml b/tools/speech_data_processor/dataset_configs/spanish/mls/config_mls_es.yaml similarity index 100% rename from tools/speech_dataset_processor/dataset_configs/spanish/mls/config_mls_es.yaml rename to tools/speech_data_processor/dataset_configs/spanish/mls/config_mls_es.yaml diff --git a/tools/speech_dataset_processor/dataset_configs/spanish/mls/unique_processors/clean_roman_numerals.py b/tools/speech_data_processor/dataset_configs/spanish/mls/unique_processors/clean_roman_numerals.py similarity index 100% rename from tools/speech_dataset_processor/dataset_configs/spanish/mls/unique_processors/clean_roman_numerals.py rename to tools/speech_data_processor/dataset_configs/spanish/mls/unique_processors/clean_roman_numerals.py diff --git a/tools/speech_dataset_processor/main.py b/tools/speech_data_processor/main.py similarity index 100% rename from tools/speech_dataset_processor/main.py rename to tools/speech_data_processor/main.py diff --git a/tools/speech_dataset_processor/requirements.txt b/tools/speech_data_processor/requirements.txt similarity index 100% rename from tools/speech_dataset_processor/requirements.txt rename to tools/speech_data_processor/requirements.txt diff --git a/tools/speech_dataset_processor/sdp/__init__.py b/tools/speech_data_processor/sdp/__init__.py similarity index 100% rename from tools/speech_dataset_processor/sdp/__init__.py rename to tools/speech_data_processor/sdp/__init__.py diff --git a/tools/speech_dataset_processor/sdp/processors/__init__.py b/tools/speech_data_processor/sdp/processors/__init__.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/__init__.py rename to tools/speech_data_processor/sdp/processors/__init__.py diff --git a/tools/speech_dataset_processor/sdp/processors/asr_inference.py b/tools/speech_data_processor/sdp/processors/asr_inference.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/asr_inference.py rename to tools/speech_data_processor/sdp/processors/asr_inference.py diff --git a/tools/speech_dataset_processor/sdp/processors/base_processor.py b/tools/speech_data_processor/sdp/processors/base_processor.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/base_processor.py rename to tools/speech_data_processor/sdp/processors/base_processor.py diff --git a/tools/speech_dataset_processor/sdp/processors/create_initial_manifest/__init__.py b/tools/speech_data_processor/sdp/processors/create_initial_manifest/__init__.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/create_initial_manifest/__init__.py rename to tools/speech_data_processor/sdp/processors/create_initial_manifest/__init__.py diff --git a/tools/speech_dataset_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py b/tools/speech_data_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py rename to tools/speech_data_processor/sdp/processors/create_initial_manifest/create_initial_manifest_mls.py diff --git a/tools/speech_dataset_processor/sdp/processors/modify_manifest/__init__.py b/tools/speech_data_processor/sdp/processors/modify_manifest/__init__.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/modify_manifest/__init__.py rename to tools/speech_data_processor/sdp/processors/modify_manifest/__init__.py diff --git a/tools/speech_dataset_processor/sdp/processors/modify_manifest/data_to_data.py b/tools/speech_data_processor/sdp/processors/modify_manifest/data_to_data.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/modify_manifest/data_to_data.py rename to tools/speech_data_processor/sdp/processors/modify_manifest/data_to_data.py diff --git a/tools/speech_dataset_processor/sdp/processors/modify_manifest/data_to_dropbool.py b/tools/speech_data_processor/sdp/processors/modify_manifest/data_to_dropbool.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/modify_manifest/data_to_dropbool.py rename to tools/speech_data_processor/sdp/processors/modify_manifest/data_to_dropbool.py diff --git a/tools/speech_dataset_processor/sdp/processors/modify_manifest/modify_manifest.py b/tools/speech_data_processor/sdp/processors/modify_manifest/modify_manifest.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/modify_manifest/modify_manifest.py rename to tools/speech_data_processor/sdp/processors/modify_manifest/modify_manifest.py diff --git a/tools/speech_dataset_processor/sdp/processors/write_manifest.py b/tools/speech_data_processor/sdp/processors/write_manifest.py similarity index 100% rename from tools/speech_dataset_processor/sdp/processors/write_manifest.py rename to tools/speech_data_processor/sdp/processors/write_manifest.py diff --git a/tools/speech_dataset_processor/sdp/run_processors.py b/tools/speech_data_processor/sdp/run_processors.py similarity index 100% rename from tools/speech_dataset_processor/sdp/run_processors.py rename to tools/speech_data_processor/sdp/run_processors.py diff --git a/tools/speech_dataset_processor/sdp/utils/__init__.py b/tools/speech_data_processor/sdp/utils/__init__.py similarity index 100% rename from tools/speech_dataset_processor/sdp/utils/__init__.py rename to tools/speech_data_processor/sdp/utils/__init__.py diff --git a/tools/speech_dataset_processor/sdp/utils/common.py b/tools/speech_data_processor/sdp/utils/common.py similarity index 100% rename from tools/speech_dataset_processor/sdp/utils/common.py rename to tools/speech_data_processor/sdp/utils/common.py diff --git a/tools/speech_dataset_processor/sdp/utils/edit_spaces.py b/tools/speech_data_processor/sdp/utils/edit_spaces.py similarity index 100% rename from tools/speech_dataset_processor/sdp/utils/edit_spaces.py rename to tools/speech_data_processor/sdp/utils/edit_spaces.py diff --git a/tools/speech_dataset_processor/sdp/utils/get_diff.py b/tools/speech_data_processor/sdp/utils/get_diff.py similarity index 100% rename from tools/speech_dataset_processor/sdp/utils/get_diff.py rename to tools/speech_data_processor/sdp/utils/get_diff.py diff --git a/tools/speech_dataset_processor/sdp/utils/metrics_computation.py b/tools/speech_data_processor/sdp/utils/metrics_computation.py similarity index 100% rename from tools/speech_dataset_processor/sdp/utils/metrics_computation.py rename to tools/speech_data_processor/sdp/utils/metrics_computation.py diff --git a/tools/speech_dataset_processor/tests/__init__.py b/tools/speech_data_processor/tests/__init__.py similarity index 100% rename from tools/speech_dataset_processor/tests/__init__.py rename to tools/speech_data_processor/tests/__init__.py diff --git a/tools/speech_dataset_processor/tests/prepare_test_data/prepare_mls_data.py b/tools/speech_data_processor/tests/prepare_test_data/prepare_mls_data.py similarity index 100% rename from tools/speech_dataset_processor/tests/prepare_test_data/prepare_mls_data.py rename to tools/speech_data_processor/tests/prepare_test_data/prepare_mls_data.py diff --git a/tools/speech_dataset_processor/tests/test_all_cfgs.py b/tools/speech_data_processor/tests/test_all_cfgs.py similarity index 100% rename from tools/speech_dataset_processor/tests/test_all_cfgs.py rename to tools/speech_data_processor/tests/test_all_cfgs.py diff --git a/tools/speech_dataset_processor/tests/test_data_to_data.py b/tools/speech_data_processor/tests/test_data_to_data.py similarity index 100% rename from tools/speech_dataset_processor/tests/test_data_to_data.py rename to tools/speech_data_processor/tests/test_data_to_data.py diff --git a/tools/speech_dataset_processor/tests/test_data_to_dropbool.py b/tools/speech_data_processor/tests/test_data_to_dropbool.py similarity index 100% rename from tools/speech_dataset_processor/tests/test_data_to_dropbool.py rename to tools/speech_data_processor/tests/test_data_to_dropbool.py diff --git a/tools/speech_dataset_processor/tests/test_utils.py b/tools/speech_data_processor/tests/test_utils.py similarity index 100% rename from tools/speech_dataset_processor/tests/test_utils.py rename to tools/speech_data_processor/tests/test_utils.py From e57b3eca50b3b6bab1915872437fe47bf2697445 Mon Sep 17 00:00:00 2001 From: Adi Renduchintala <108822655+arendu@users.noreply.github.com> Date: Wed, 9 Nov 2022 19:03:19 -0800 Subject: [PATCH 169/244] fix for num worker 0 causing issues in losses after 1 epoch (#5379) --- .../megatron/gpt_prompt_learning_dataset.py | 9 ++++++--- .../megatron/t5_prompt_learning_dataset.py | 1 - 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nemo/collections/nlp/data/language_modeling/megatron/gpt_prompt_learning_dataset.py b/nemo/collections/nlp/data/language_modeling/megatron/gpt_prompt_learning_dataset.py index 3d0c29673c83..69cd485b0ca5 100755 --- a/nemo/collections/nlp/data/language_modeling/megatron/gpt_prompt_learning_dataset.py +++ b/nemo/collections/nlp/data/language_modeling/megatron/gpt_prompt_learning_dataset.py @@ -364,6 +364,7 @@ def collate_fn(self, batch, tp_workers=0): def pad_batch_and_build_loss_mask(self, input_ids, batch_max, answer_starts): """ Pad input_ids in batch to max batch length while building loss mask """ batch_loss_masks = [] + padded_input_ids = [] for ids, answer_start_idx in zip(input_ids, answer_starts): if answer_start_idx is not None: # Loss mask where answer tokens are 1.0 and all other tokens are 0.0 @@ -375,17 +376,19 @@ def pad_batch_and_build_loss_mask(self, input_ids, batch_max, answer_starts): # Pad to max length input_length = len(ids) padding_length = batch_max - input_length - ids.extend([self.pad_token_id] * padding_length) + pad_extend = [self.pad_token_id] * padding_length + ids = ids + pad_extend + padded_input_ids.append(ids) # Account for padding in loss mask loss_mask.extend([0.0] * padding_length) batch_loss_masks.append(torch.tensor(loss_mask, dtype=torch.float)) # Make into torch tensors - input_ids = torch.tensor(input_ids, dtype=torch.long) + padded_input_ids = torch.tensor(padded_input_ids, dtype=torch.long) batch_loss_masks = torch.stack(batch_loss_masks) - return input_ids, batch_loss_masks + return padded_input_ids, batch_loss_masks def inference_collate_fn(self, batch): """ diff --git a/nemo/collections/nlp/data/language_modeling/megatron/t5_prompt_learning_dataset.py b/nemo/collections/nlp/data/language_modeling/megatron/t5_prompt_learning_dataset.py index 0f39cd8e05c9..2858d9d183df 100644 --- a/nemo/collections/nlp/data/language_modeling/megatron/t5_prompt_learning_dataset.py +++ b/nemo/collections/nlp/data/language_modeling/megatron/t5_prompt_learning_dataset.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import enum import json import torch From f8f31a13de40ab7630077a6d39aee0de7d2c2276 Mon Sep 17 00:00:00 2001 From: Virginia Adams <78445382+vadam5@users.noreply.github.com> Date: Thu, 10 Nov 2022 15:43:31 -0800 Subject: [PATCH 170/244] Fixed bug in notebook (#5382) Signed-off-by: Virginia Adams Signed-off-by: Virginia Adams --- .../language_modeling/megatron_gpt_prompt_learning_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py index add7c898c80c..387ff52bb078 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py @@ -986,7 +986,7 @@ def dummy(): # Call same generate code as in MegatronGPT return megatron_gpt_generate( - self.cuda(), processed_inputs, self.tokenizer, length_params, sampling_params, task_ids + self.cuda(), processed_inputs, self.tokenizer, length_params, sampling_params, task_ids=task_ids ) def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any: From 4c9c8585a8ade42d52e09022e14ceaa1df13ffae Mon Sep 17 00:00:00 2001 From: Somshubra Majumdar Date: Thu, 10 Nov 2022 15:51:55 -0800 Subject: [PATCH 171/244] Force MHA QKV onto fp32 (#5391) Signed-off-by: smajumdar Signed-off-by: smajumdar --- .../asr/parts/submodules/multi_head_attention.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nemo/collections/asr/parts/submodules/multi_head_attention.py b/nemo/collections/asr/parts/submodules/multi_head_attention.py index 78cf1ce37212..8f774e172718 100644 --- a/nemo/collections/asr/parts/submodules/multi_head_attention.py +++ b/nemo/collections/asr/parts/submodules/multi_head_attention.py @@ -140,6 +140,9 @@ def forward(self, query, key, value, mask, pos_emb=None, cache=None, cache_next= """ key, value, query = self.update_cache(key=key, value=value, query=query, cache=cache, cache_next=cache_next) + if torch.is_autocast_enabled(): + query, key, value = query.to(torch.float32), key.to(torch.float32), value.to(torch.float32) + # temporary until we solve this more gracefully with avoid_float16_autocast_context(): q, k, v = self.forward_qkv(query, key, value) @@ -217,6 +220,9 @@ def forward(self, query, key, value, mask, pos_emb, cache=None, cache_next=None) """ key, value, query = self.update_cache(key=key, value=value, query=query, cache=cache, cache_next=cache_next) + if torch.is_autocast_enabled(): + query, key, value = query.to(torch.float32), key.to(torch.float32), value.to(torch.float32) + # temporary until we solve this more gracefully with avoid_float16_autocast_context(): q, k, v = self.forward_qkv(query, key, value) From f789334b9deaaad49e4c80bb3a978af40685680e Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 14 Nov 2022 08:56:01 -0800 Subject: [PATCH 172/244] Added scheduling variety --- examples/tts/conf/vits.yaml | 6 +++--- examples/tts/conf/vits_44100.yaml | 5 +++-- nemo/collections/tts/models/vits.py | 23 ++++++++++++++++------- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index be6d9040a5dc..0e2894dc6831 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -65,7 +65,7 @@ model: punct: true apostrophe: true pad_with_space: false - sep_with_space: true + sep_with_space: false g2p: _target_: nemo_text_processing.g2p.modules.IPAG2P phoneme_dict: ${phoneme_dict_path} @@ -205,7 +205,7 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_fp16_local + exp_dir: ??? name: ${name} create_tensorboard_logger: false create_checkpoint_callback: true @@ -214,7 +214,7 @@ exp_manager: mode: min create_wandb_logger: true wandb_logger_kwargs: - name: vits_fp16_local + name: ??? project: ${name} entity: nvidia resume: "allow" diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml index 5646a608ed02..0915844ac945 100644 --- a/examples/tts/conf/vits_44100.yaml +++ b/examples/tts/conf/vits_44100.yaml @@ -182,8 +182,9 @@ model: eps: 1e-9 sched: - name: ExponentialLR - lr_decay: 0.999875 + name: CosineAnnealing + max_steps: 1000000 + min_lr: 1e-5 trainer: num_nodes: 1 diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 545ed7b92306..171c930256f8 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -36,12 +36,15 @@ GeneratorLoss ) from nemo.collections.tts.models.base import TextToWaveform -from nemo.collections.tts.modules.vits_modules import *# MultiPeriodDiscriminator +from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator from nemo.collections.tts.torch.data import DistributedBucketSampler from nemo.collections.tts.torch.tts_data_types import SpeakerID from nemo.core.classes.common import PretrainedModelInfo +from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils + + HAVE_WANDB = True try: import wandb @@ -146,9 +149,13 @@ def configure_optimizers(self): optim_d = instantiate(optim_config, params=self.net_d.parameters(),) if sched_config is not None: - scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=sched_config.lr_decay) - scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=sched_config.lr_decay) - + if sched_config.name == 'ExponentialLR': + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=sched_config.lr_decay) + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=sched_config.lr_decay) + elif sched_config.name == 'CosineAnnealing': + scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr,) + scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr,) + scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] @@ -221,10 +228,12 @@ def training_step(self, batch, batch_idx): optim_g.step() schedulers = self.lr_schedulers() - if schedulers is not None and self.trainer.is_last_batch: + if schedulers is not None: sch1, sch2 = schedulers - sch1.step() - sch2.step() + if self.trainer.is_last_batch and isinstance(sch1, 'torch.optim.lr_scheduler.ExponentialLR') \ + or isinstance(sch1, 'CosineAnnealing'): + sch1.step() + sch2.step() metrics = { "loss_gen": loss_gen, From a77b1b3a1e025b14a9fd74e4343ca609541a2c8f Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 14 Nov 2022 08:58:51 -0800 Subject: [PATCH 173/244] ref --- nemo/collections/tts/models/vits.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 171c930256f8..d557999325ac 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -260,14 +260,11 @@ def training_step(self, batch, batch_idx): def validation_step(self, batch, batch_idx): speakers = None - # if SpeakerID in self._train_dl.dataset.sup_data_types_set: if self.cfg.n_speakers > 1: (y, y_lengths, x, x_lengths, speakers) = batch else: (y, y_lengths, x, x_lengths) = batch - if speakers == None: - print(speakers) y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, speakers, max_len=1000) y_hat = y_hat.squeeze() From dbe41af90b0a4dd4cf5d3963ec7d17e654f25fd9 Mon Sep 17 00:00:00 2001 From: Virginia Adams <78445382+vadam5@users.noreply.github.com> Date: Mon, 14 Nov 2022 11:10:13 -0800 Subject: [PATCH 174/244] Fix for prompt table restore error (#5393) * Fix for prompt table restore error Signed-off-by: Virginia Adams * Added more saftey checks Signed-off-by: Virginia Adams * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added more condition checks Signed-off-by: Virginia Adams * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: Virginia Adams Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../megatron_base_prompt_learning_model.py | 6 ++++++ .../language_modeling/megatron_gpt_prompt_learning_model.py | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_base_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_base_prompt_learning_model.py index ffae75ed5a34..67448badb43a 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_base_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_base_prompt_learning_model.py @@ -436,6 +436,12 @@ def save_checkpoint_as_nemo_file(self): self.virtual_prompt_style = current_virtual_prompt_style self.virtual_prompt_source = current_virtual_prompt_source + # Revert prompt table back to previous state + if self.virtual_prompt_style == VirtualPromptStyle.P_TUNING and self.first_stage_of_pipeline(): + for taskname in current_new_tasks: + if taskname in self.prompt_table.prompt_table: + del self.prompt_table.prompt_table[taskname] + with open_dict(self.cfg): self.cfg.existing_tasks = current_existing_tasks self.cfg.new_tasks = current_new_tasks diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py index 387ff52bb078..21ff3e6ad4d9 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py @@ -707,6 +707,12 @@ def save_checkpoint_as_nemo_file(self): self.virtual_prompt_style = current_virtual_prompt_style self.virtual_prompt_source = current_virtual_prompt_source + # Revert prompt table back to previous state + if self.virtual_prompt_style == VirtualPromptStyle.P_TUNING and self.frozen_model.model.pre_process: + for taskname in current_new_tasks: + if taskname in self.prompt_table.prompt_table: + del self.prompt_table.prompt_table[taskname] + with open_dict(self.cfg): self.cfg.existing_tasks = current_existing_tasks self.cfg.new_tasks = current_new_tasks From 1b5fac4ab0a5067df55283c15890b8ed891ad28c Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Mon, 14 Nov 2022 17:29:44 -0800 Subject: [PATCH 175/244] Fix args (#5410) Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy --- .../language_modeling/conf/megatron_model_base_config.yaml | 1 + .../nlp/modules/common/megatron/megatron_decoders.py | 1 + .../nlp/modules/common/megatron/megatron_encoders.py | 2 ++ .../modules/common/megatron/megatron_transformer_decoder.py | 2 ++ .../modules/common/megatron/megatron_transformer_encoder.py | 2 ++ .../modules/common/megatron/token_level_encoder_decoder.py | 4 +++- 6 files changed, 11 insertions(+), 1 deletion(-) diff --git a/examples/nlp/language_modeling/conf/megatron_model_base_config.yaml b/examples/nlp/language_modeling/conf/megatron_model_base_config.yaml index f68b9ecf87b2..1602cda23731 100644 --- a/examples/nlp/language_modeling/conf/megatron_model_base_config.yaml +++ b/examples/nlp/language_modeling/conf/megatron_model_base_config.yaml @@ -31,5 +31,6 @@ onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter. fp32_residual_connection: False # Use FP32 for residual connections. activations_checkpoint_method: null # 'uniform', 'block' activations_checkpoint_num_layers: 1 +activations_checkpoint_granularity: null megatron_legacy: False # Whether to use the legacy Megatron model. This affects the way q,k,v is partitioned from the mixed q,k,v layer in ParallelAttention. This needs to be True for models converted from HF. normalize_attention_scores: True # Whether to scale the output Q * K^T by 1 / sqrt(hidden_size_per_head). This arg is provided as a configuration option mostly for compatibility with models that have been weight-converted from HF. You almost always want to se this to True. diff --git a/nemo/collections/nlp/modules/common/megatron/megatron_decoders.py b/nemo/collections/nlp/modules/common/megatron/megatron_decoders.py index 63d14cfe84d1..901d55ef4511 100644 --- a/nemo/collections/nlp/modules/common/megatron/megatron_decoders.py +++ b/nemo/collections/nlp/modules/common/megatron/megatron_decoders.py @@ -119,6 +119,7 @@ def get_decoder_model( fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, + activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, bias_activation_fusion=bias_activation_fusion, bias_dropout_add_fusion=bias_dropout_add_fusion, diff --git a/nemo/collections/nlp/modules/common/megatron/megatron_encoders.py b/nemo/collections/nlp/modules/common/megatron/megatron_encoders.py index 1917979fc66a..6b6a44c036e9 100644 --- a/nemo/collections/nlp/modules/common/megatron/megatron_encoders.py +++ b/nemo/collections/nlp/modules/common/megatron/megatron_encoders.py @@ -121,6 +121,7 @@ def get_encoder_model( fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, + activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, bias_activation_fusion=bias_activation_fusion, bias_dropout_add_fusion=bias_dropout_add_fusion, @@ -198,6 +199,7 @@ def get_encoder_model( fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, + activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, bias_activation_fusion=bias_activation_fusion, bias_dropout_add_fusion=bias_dropout_add_fusion, diff --git a/nemo/collections/nlp/modules/common/megatron/megatron_transformer_decoder.py b/nemo/collections/nlp/modules/common/megatron/megatron_transformer_decoder.py index 5104855c860d..530eeffaf466 100644 --- a/nemo/collections/nlp/modules/common/megatron/megatron_transformer_decoder.py +++ b/nemo/collections/nlp/modules/common/megatron/megatron_transformer_decoder.py @@ -65,6 +65,7 @@ def __init__( fp32_residual_connection=False, activations_checkpoint_method=None, activations_checkpoint_num_layers=1, + activations_checkpoint_granularity=None, layernorm_epsilon=1e-5, bias_activation_fusion=True, bias_dropout_add_fusion=True, @@ -119,6 +120,7 @@ def __init__( fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, + activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, hidden_dropout=hidden_dropout, attention_dropout=attention_dropout, diff --git a/nemo/collections/nlp/modules/common/megatron/megatron_transformer_encoder.py b/nemo/collections/nlp/modules/common/megatron/megatron_transformer_encoder.py index b48d89cd9644..4b1799680d54 100644 --- a/nemo/collections/nlp/modules/common/megatron/megatron_transformer_encoder.py +++ b/nemo/collections/nlp/modules/common/megatron/megatron_transformer_encoder.py @@ -62,6 +62,7 @@ def __init__( fp32_residual_connection=False, activations_checkpoint_method=None, activations_checkpoint_num_layers=1, + activations_checkpoint_granularity=None, layernorm_epsilon=1e-5, bias_activation_fusion=True, bias_dropout_add_fusion=True, @@ -117,6 +118,7 @@ def __init__( fp32_residual_connection=fp32_residual_connection, activations_checkpoint_method=activations_checkpoint_method, activations_checkpoint_num_layers=activations_checkpoint_num_layers, + activations_checkpoint_granularity=activations_checkpoint_granularity, layernorm_epsilon=layernorm_epsilon, hidden_dropout=hidden_dropout, attention_dropout=attention_dropout, diff --git a/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py b/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py index 758acaa6644a..78a71492f7e1 100644 --- a/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py +++ b/nemo/collections/nlp/modules/common/megatron/token_level_encoder_decoder.py @@ -179,6 +179,7 @@ def __init__( fp32_residual_connection=encoder_cfg.get('fp32_residual_connection', False), activations_checkpoint_method=encoder_cfg.get('activations_checkpoint_method', None), activations_checkpoint_num_layers=encoder_cfg.get('activations_checkpoint_num_layers', 1), + activations_checkpoint_granularity=encoder_cfg.get('activations_checkpoint_granularity', None), layernorm_epsilon=encoder_cfg.get('layernorm_epsilon', 1e-5), bias_activation_fusion=encoder_cfg.get('bias_activation_fusion', True), bias_dropout_add_fusion=encoder_cfg.get('bias_dropout_add_fusion', True), @@ -279,11 +280,12 @@ def __init__( use_cpu_initialization=use_cpu_initialization, hidden_dropout=decoder_cfg.get('hidden_dropout', 0.1), attention_dropout=decoder_cfg.get('attention_dropout', 0.1), - ffn_dropout=encoder_cfg.get('ffn_dropout', 0.0), + ffn_dropout=decoder_cfg.get('ffn_dropout', 0.0), precision=precision, fp32_residual_connection=decoder_cfg.get('fp32_residual_connection', False), activations_checkpoint_method=decoder_cfg.get('activations_checkpoint_method', None), activations_checkpoint_num_layers=decoder_cfg.get('activations_checkpoint_num_layers', 1), + activations_checkpoint_granularity=decoder_cfg.get('activations_checkpoint_granularity', None), layernorm_epsilon=decoder_cfg.get('layernorm_epsilon', 1e-5), bias_activation_fusion=decoder_cfg.get('bias_activation_fusion', True), bias_dropout_add_fusion=decoder_cfg.get('bias_dropout_add_fusion', True), From acd34da9d712beaea23cf3ca8ab8ca55f8e99a3b Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 15 Nov 2022 04:22:59 -0800 Subject: [PATCH 176/244] bugfix --- nemo/collections/tts/models/vits.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index d557999325ac..08b4f5be421c 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -230,8 +230,8 @@ def training_step(self, batch, batch_idx): schedulers = self.lr_schedulers() if schedulers is not None: sch1, sch2 = schedulers - if self.trainer.is_last_batch and isinstance(sch1, 'torch.optim.lr_scheduler.ExponentialLR') \ - or isinstance(sch1, 'CosineAnnealing'): + if self.trainer.is_last_batch and isinstance(sch1, torch.optim.lr_scheduler.ExponentialLR) \ + or isinstance(sch1, CosineAnnealing): sch1.step() sch2.step() From 9d98c52c50f1d75d53f894e5f9ecd8bca1ab8233 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 15 Nov 2022 09:28:42 -0800 Subject: [PATCH 177/244] import tests --- .github/workflows/import-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/import-test.yml b/.github/workflows/import-test.yml index 5fc34347710d..d4662bb454ba 100644 --- a/.github/workflows/import-test.yml +++ b/.github/workflows/import-test.yml @@ -12,7 +12,7 @@ jobs: # Check https://hub.docker.com/r/pytorch/pytorch/tags for latest tags container: - image: pytorch/pytorch:1.11.0-cuda11.3-cudnn8-runtime + image: pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime steps: - uses: actions/checkout@v2 From 0718b17a8e1f89ee7e167698d1a5d5acad3f1b2a Mon Sep 17 00:00:00 2001 From: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Date: Tue, 15 Nov 2022 11:23:57 -0800 Subject: [PATCH 178/244] Add temporary fix for CUDA issue in Dockerfile (#5421) Signed-off-by: Yu Yao Signed-off-by: Yu Yao --- Dockerfile | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Dockerfile b/Dockerfile index cd78ef4348e1..51a7fb676dae 100644 --- a/Dockerfile +++ b/Dockerfile @@ -100,5 +100,9 @@ COPY tests /workspace/nemo/tests COPY tutorials /workspace/nemo/tutorials # COPY README.rst LICENSE /workspace/nemo/ +# Temporary fix CUDA issue +RUN sed -i "s/, all_gpu_ids//g" /opt/conda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cuda.py +RUN sed -i "s/all_gpu_ids =/\# all_gpu_ids =/g" /opt/conda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cuda.py + RUN printf "#!/bin/bash\njupyter lab --no-browser --allow-root --ip=0.0.0.0" >> start-jupyter.sh && \ chmod +x start-jupyter.sh From 68cd1a75a04cd34fdbb89b1c663ef10ff351eca6 Mon Sep 17 00:00:00 2001 From: David Date: Tue, 15 Nov 2022 14:54:50 -0700 Subject: [PATCH 179/244] Megatron Export Update (#5343) * export update for Megatron + change ORT optimization Signed-off-by: David Mosallanezhad * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * updated export_utils to use autocast instead of manually casting >:/ Signed-off-by: David Mosallanezhad * removed dtype from LayerNorm Signed-off-by: David Mosallanezhad * added comment Signed-off-by: David Mosallanezhad * reverting changes on FloatCast Signed-off-by: David Mosallanezhad * Cherry-picked changes from megatron-norm Signed-off-by: Boris Fomitchev * updated asr_model import to cast_utils Signed-off-by: David Mosallanezhad * updated del onnx_model place Signed-off-by: David Mosallanezhad * changed ort optimization to basic -> temp fix Signed-off-by: David Mosallanezhad Signed-off-by: David Mosallanezhad Signed-off-by: Boris Fomitchev Co-authored-by: David Mosallanezhad Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Boris Fomitchev --- nemo/collections/asr/models/asr_model.py | 2 +- .../machine_translation/megatron_nmt_model.py | 12 +- .../common/megatron/megatron_export.py | 50 ++++--- nemo/utils/__init__.py | 7 + nemo/utils/cast_utils.py | 75 +++++++++++ nemo/utils/export_utils.py | 122 +++++++++--------- scripts/export.py | 25 ++-- 7 files changed, 200 insertions(+), 93 deletions(-) create mode 100644 nemo/utils/cast_utils.py diff --git a/nemo/collections/asr/models/asr_model.py b/nemo/collections/asr/models/asr_model.py index 23f5e60d9489..7c8a7ab8aa78 100644 --- a/nemo/collections/asr/models/asr_model.py +++ b/nemo/collections/asr/models/asr_model.py @@ -21,7 +21,7 @@ from nemo.core.classes.exportable import Exportable from nemo.core.classes.mixins import AccessMixin from nemo.utils import logging, model_utils -from nemo.utils.export_utils import cast_all +from nemo.utils.cast_utils import cast_all __all__ = ['ASRModel'] diff --git a/nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py b/nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py index 0b9bb966e3e8..a44b560fbb2d 100644 --- a/nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py +++ b/nemo/collections/nlp/models/machine_translation/megatron_nmt_model.py @@ -42,6 +42,7 @@ from nemo.collections.nlp.modules.common.megatron.megatron_export import DecEmb, EncEmb, TokensHeadEmb from nemo.collections.nlp.parts.nlp_overrides import GlobalBatchDataFetcher from nemo.collections.nlp.parts.utils_funcs import get_last_rank +from nemo.core.classes import Exportable from nemo.utils import AppState, logging, timers try: @@ -56,7 +57,7 @@ __all__ = ["MegatronNMTModel"] -class MegatronNMTModel(MegatronLMEncoderDecoderModel): +class MegatronNMTModel(MegatronLMEncoderDecoderModel, Exportable): """ Megatron NMT training """ @@ -750,5 +751,12 @@ def decoder(self): return DecEmb(self.enc_dec_model.decoder_embedding, self.enc_dec_model.enc_dec_model.decoder, self.device) @property - def classifier(self): + def log_softmax(self): return TokensHeadEmb(self.enc_dec_model.decoder_embedding, self.enc_dec_model.tokens_head, self.device) + + @property + def input_module(self): + return self.encoder + + def list_export_subnets(self): + return ['encoder', 'log_softmax', 'decoder'] diff --git a/nemo/collections/nlp/modules/common/megatron/megatron_export.py b/nemo/collections/nlp/modules/common/megatron/megatron_export.py index 6fd9a239380c..8b9a5fff9e88 100644 --- a/nemo/collections/nlp/modules/common/megatron/megatron_export.py +++ b/nemo/collections/nlp/modules/common/megatron/megatron_export.py @@ -49,21 +49,23 @@ def forward(self, dec_output): if isinstance(dec_output, list): dec_output = dec_output[0] - dec_output = torch.permute(dec_output, (1, 0, 2)) - if self.tokens_head_bias is not None: return F.linear(dec_output, self.decoder_embedding.word_embeddings.weight, self.tokens_head_bias) return F.linear(dec_output, self.decoder_embedding.word_embeddings.weight) - def input_example(self, max_batch=1, max_dim=1024, seq_len=6): + def input_example(self, max_batch=1, max_dim=768, seq_len=6): return [ - torch.randint(low=-3, high=3, size=(seq_len, max_batch, max_dim), device=self.device, dtype=torch.float32) + torch.randint(low=-3, high=3, size=(max_batch, seq_len, max_dim), device=self.device, dtype=torch.float32) ] + def freeze(self): + for param in self.parameters(): + param.requires_grad = False + @property def input_types(self) -> Optional[Dict[str, NeuralType]]: return { - "hidden_states": NeuralType(('T', 'B', 'D'), ChannelType()), + "hidden_states": NeuralType(('B', 'T', 'D'), ChannelType()), } @property @@ -107,18 +109,28 @@ def forward(self, input_ids, decoder_mask, encoder_mask, encoder_embeddings, dec # dec_input, dec_attn_mask, enc_output, enc_attn_mask | dec_input, dec_attn_mask, enc_output, enc_attn_mask _ = dec_mems - return self.decoder(dec_input, decoder_mask, encoder_embeddings, encoder_mask).float() + return ( + self.decoder(dec_input, decoder_mask, encoder_embeddings.permute(1, 0, 2), encoder_mask) + .float() + .permute(1, 0, 2) + ) - def input_example(self, max_batch=1, max_dim=1024, seq_len=6): + def freeze(self): + for param in self.parameters(): + param.requires_grad = False + + def input_example(self, max_batch=1, max_dim=768, seq_len=6): enc_output = torch.randint( - low=-3, high=3, size=(seq_len, max_batch, max_dim), device=self.device, dtype=torch.float32 + low=-3, high=3, size=(max_batch, seq_len, max_dim), device=self.device, dtype=torch.float32 ) enc_attn_mask = torch.tensor([[1 for _ in range(seq_len)]]).to(self.device) dec_len = random.randint(10, 128) dec_input = torch.randint(low=0, high=1000, size=(max_batch, dec_len), device=self.device) dec_attn_mask = torch.tensor([[1 for _ in range(dec_len)]]).to(self.device) - decoder_mems = torch.zeros([8, 6, 1024], dtype=torch.float32).to(self.device) + + # constant decoder_mems as placeholder for now + decoder_mems = torch.zeros([8, 6, max_dim], dtype=torch.float32).to(self.device) # input_ids, decoder_mask, encoder_mask, encoder_embeddings return (dec_input, dec_attn_mask, enc_attn_mask, enc_output, decoder_mems) @@ -128,14 +140,14 @@ def input_types(self) -> Optional[Dict[str, NeuralType]]: return { "input_ids": NeuralType(('B', 'T', 'D'), ChannelType()), "decoder_mask": NeuralType(('B', 'T'), MaskType()), - "encoder_mask": NeuralType(('T', 'B', 'D'), ChannelType()), + "encoder_mask": NeuralType(('B', 'T', 'D'), ChannelType()), "encoder_embeddings": NeuralType(('B', 'T'), MaskType()), - "decoder_mems": NeuralType(('T', 'B', 'D'), ChannelType()), + "decoder_mems": NeuralType(('B', 'T', 'D'), ChannelType()), } @property def output_types(self) -> Optional[Dict[str, NeuralType]]: - return {"last_hidden_states": NeuralType(('T', 'B', 'D'), ChannelType())} + return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())} @property def input_names(self) -> List[str]: @@ -172,15 +184,19 @@ def forward(self, input_ids, encoder_mask): enc_input = self.encoder_embedding(input_ids, position_ids, token_type_ids=None) # pass input through the encoder - return self.encoder(enc_input=enc_input, enc_attn_mask=encoder_mask,).type(torch.float32) + return self.encoder(enc_input=enc_input, enc_attn_mask=encoder_mask,).permute(1, 0, 2) - def input_example(self): + def input_example(self, max_batch=1, max_dim=30000, seq_len=6): seq_len = random.randint(0, 128) return ( - torch.randint(0, 30000, (1, seq_len)).to(self.device), - torch.ones((1, seq_len), dtype=int).to(self.device), + torch.randint(0, max_dim, (max_batch, seq_len)).to(self.device), + torch.ones((max_batch, seq_len), dtype=int).to(self.device), ) + def freeze(self): + for param in self.parameters(): + param.requires_grad = False + @property def input_types(self) -> Optional[Dict[str, NeuralType]]: return { @@ -190,7 +206,7 @@ def input_types(self) -> Optional[Dict[str, NeuralType]]: @property def output_types(self) -> Optional[Dict[str, NeuralType]]: - return {"last_hidden_states": NeuralType(('T', 'B', 'D'), ChannelType())} + return {"last_hidden_states": NeuralType(('B', 'T', 'D'), ChannelType())} @property def input_names(self) -> List[str]: diff --git a/nemo/utils/__init__.py b/nemo/utils/__init__.py index fd3cbd2699d1..0b2748e580af 100644 --- a/nemo/utils/__init__.py +++ b/nemo/utils/__init__.py @@ -14,6 +14,13 @@ from nemo.utils.app_state import AppState +from nemo.utils.cast_utils import ( + CastToFloat, + avoid_bfloat16_autocast_context, + avoid_float16_autocast_context, + cast_all, + cast_tensor, +) from nemo.utils.nemo_logging import Logger as _Logger from nemo.utils.nemo_logging import LogMode as logging_mode diff --git a/nemo/utils/cast_utils.py b/nemo/utils/cast_utils.py new file mode 100644 index 000000000000..9eb064936ea5 --- /dev/null +++ b/nemo/utils/cast_utils.py @@ -0,0 +1,75 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from contextlib import nullcontext + +import torch + + +def avoid_bfloat16_autocast_context(): + """ + If the current autocast context is bfloat16, + cast it to float32 + """ + + if torch.is_autocast_enabled() and torch.get_autocast_gpu_dtype() == torch.bfloat16: + return torch.cuda.amp.autocast(dtype=torch.float32) + else: + return nullcontext() + + +def avoid_float16_autocast_context(): + """ + If the current autocast context is float16, cast it to bfloat16 + if available (unless we're in jit) or float32 + """ + + if torch.is_autocast_enabled() and torch.get_autocast_gpu_dtype() == torch.float16: + if torch.jit.is_scripting() or torch.jit.is_tracing(): + return torch.cuda.amp.autocast(dtype=torch.float32) + + if torch.cuda.is_bf16_supported(): + return torch.cuda.amp.autocast(dtype=torch.bfloat16) + else: + return torch.cuda.amp.autocast(dtype=torch.float32) + else: + return nullcontext() + + +def cast_tensor(x, from_dtype=torch.float16, to_dtype=torch.float32): + return x.to(dtype=to_dtype) if x.dtype == from_dtype else x + + +def cast_all(x, from_dtype=torch.float16, to_dtype=torch.float32): + if isinstance(x, torch.Tensor): + return cast_tensor(x, from_dtype=from_dtype, to_dtype=to_dtype) + else: + if isinstance(x, dict): + new_dict = {} + for k in x.keys(): + new_dict[k] = cast_all(x[k], from_dtype=from_dtype, to_dtype=to_dtype) + return new_dict + elif isinstance(x, tuple): + return tuple(cast_all(y, from_dtype=from_dtype, to_dtype=to_dtype) for y in x) + + +class CastToFloat(torch.nn.Module): + def __init__(self, mod): + super(CastToFloat, self).__init__() + self.mod = mod + + def forward(self, x): + with avoid_float16_autocast_context(): + ret = self.mod.forward(x.to(torch.float32)).to(x.dtype) + return ret diff --git a/nemo/utils/export_utils.py b/nemo/utils/export_utils.py index 3d042dd070e2..a5c4e5b3d24f 100644 --- a/nemo/utils/export_utils.py +++ b/nemo/utils/export_utils.py @@ -13,6 +13,7 @@ # limitations under the License. import os +from contextlib import nullcontext from enum import Enum from typing import Callable, Dict, Optional, Type @@ -21,7 +22,7 @@ import torch.nn as nn import torch.nn.functional as F -from nemo.utils import logging +from nemo.utils import CastToFloat, logging try: import onnxruntime @@ -45,36 +46,6 @@ class ExportFormat(Enum): } -def cast_tensor(x, from_dtype=torch.float16, to_dtype=torch.float32): - return x.to(dtype=to_dtype) if x.dtype == from_dtype else x - - -def cast_all(x, from_dtype=torch.float16, to_dtype=torch.float32): - if isinstance(x, torch.Tensor): - return cast_tensor(x, from_dtype=from_dtype, to_dtype=to_dtype) - else: - if isinstance(x, dict): - new_dict = {} - for k in x.keys(): - new_dict[k] = cast_all(x[k], from_dtype=from_dtype, to_dtype=to_dtype) - return new_dict - elif isinstance(x, tuple): - return tuple(cast_all(y, from_dtype=from_dtype, to_dtype=to_dtype) for y in x) - - -class CastToFloat(nn.Module): - def __init__(self, mod): - super(CastToFloat, self).__init__() - self.mod = mod - - def forward(self, x): - if torch.is_autocast_enabled(): - ret = self.mod.forward(x.to(torch.float32)).to(x.dtype) - else: - ret = self.mod.forward(x) - return ret - - class LinearWithBiasSkip(nn.Module): def __init__(self, weight, bias, skip_bias_add): super(LinearWithBiasSkip, self).__init__() @@ -88,27 +59,46 @@ def forward(self, x): return F.linear(x, self.weight, self.bias), None -# ScaledMaskedSoftmax replacement -def mask_func(attention_scores, attention_mask): - attention_scores.masked_fill_(attention_mask, -10000.0) - return attention_scores - - -def exportable_ScaledMaskedSoftmax(input, mask, scale): - if scale is not None: - input = input * scale - - mask_output = mask_func(input, mask) if mask is not None else input - probs = torch.nn.Softmax(dim=-1)(mask_output) - - probs = probs.half() - return probs +class ExportableMatchedScaleMaskSoftmax(nn.Module): + def __init__(self, mod): + super(ExportableMatchedScaleMaskSoftmax, self).__init__() + self.init_module(mod.input_in_fp16, mod.input_in_bf16, mod.mask_func, mod.softmax_in_fp32, mod.scale) + + def init_module( + self, input_in_fp16, input_in_bf16, mask_func, softmax_in_fp32, scale, + ): + self.input_in_fp16 = input_in_fp16 + self.input_in_bf16 = input_in_bf16 + self.softmax_in_fp32 = softmax_in_fp32 + self.mask_func = mask_func + self.scale = scale + + self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16 + + def forward(self, input, mask): + if self.input_in_float16 and self.softmax_in_fp32: + input = input.float() + + if self.scale is not None: + input = input * self.scale + mask_output = self.mask_func(input, mask) if mask is not None else input + probs = torch.nn.Softmax(dim=-1)(mask_output) + all_k_masked = mask.all(axis=-1) + zero_attention_mask = (1.0 - all_k_masked.float())[:, :, :, None] + probs = probs * zero_attention_mask + + if self.input_in_float16 and self.softmax_in_fp32: + if self.input_in_fp16: + probs = probs.half() + else: + probs = probs.bfloat16() + return probs def get_export_format(filename: str): _, ext = os.path.splitext(filename) try: - return _EXT_DICT[ext] + return _EXT_DICT[ext.lower()] except KeyError: raise ValueError(f"Export file {filename} extension does not correspond to any export format!") @@ -186,12 +176,12 @@ def verify_runtime(model, output, input_examples, input_names, check_tolerance=0 logging.warning(f"ONNX generated at {output}, not verified - please install onnxruntime_gpu package.\n") onnx.checker.check_model(onnx_model, full_check=True) return - onnx_session_opt = onnxruntime.SessionOptions() - onnx_session_opt.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + onnx_session_opt.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_BASIC sess = onnxruntime.InferenceSession( onnx_model.SerializeToString(), sess_options=onnx_session_opt, providers=['CUDAExecutionProvider'] ) + del onnx_model all_good = True for input_example in input_examples: input_list, input_dict = parse_input_example(input_example) @@ -244,7 +234,7 @@ def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01): from apex.transformer.tensor_parallel.layers import RowParallelLinear, ColumnParallelLinear from apex.transformer.functional.fused_softmax import FusedScaleMaskSoftmax - def replace_FusedLayerNorm(n: nn.Module) -> Optional[nn.BatchNorm2d]: + def replace_FusedLayerNorm(n: nn.Module) -> Optional[nn.LayerNorm]: """ Replaces Apex's FusedLayerNorm with nn.LayerNorm. This is required for ONNX export. Args: @@ -252,19 +242,16 @@ def replace_FusedLayerNorm(n: nn.Module) -> Optional[nn.BatchNorm2d]: Returns: Equivalent LayerNorm module """ - if ( - not isinstance(n, FusedLayerNorm) - and not isinstance(n, FastLayerNorm) - and not isinstance(n, MixedFusedLayerNorm) - ): - return None - dev = next(n.parameters()).device + p = next(n.parameters()) if isinstance(n, FusedLayerNorm) or isinstance(n, MixedFusedLayerNorm): - mod = nn.LayerNorm(n.normalized_shape, eps=n.eps, elementwise_affine=n.elementwise_affine,).to(dev) + shape, eps, affine = n.normalized_shape, n.eps, n.elementwise_affine elif isinstance(n, FastLayerNorm): - mod = nn.LayerNorm(n.weight.shape, eps=n.epsilon, elementwise_affine=True, dtype=torch.float16,).to(dev) + shape, eps, affine = n.weight.shape, n.epsilon, True + else: + return None + mod = nn.LayerNorm(shape, eps=eps, elementwise_affine=affine, device=p.device, dtype=p.dtype) n_state = n.state_dict() mod.load_state_dict(n_state) return mod @@ -281,7 +268,7 @@ def replace_RowParallelLinear(n: nn.Module) -> Optional[nn.Linear]: raise ValueError("This function can only change the RowParallelLinear module.") dev = next(n.parameters()).device - mod = LinearWithBiasSkip(n.weight, n.bias, n.skip_bias_add).to(dev) + mod = LinearWithBiasSkip(n.weight, n.bias, n.skip_bias_add).to(device=dev) n_state = n.state_dict() mod.load_state_dict(n_state) @@ -357,6 +344,20 @@ def expansion_fn(mod: nn.Module) -> Optional[nn.Module]: return expansion_fn +def replace_MatchedScaleMaskSoftmax(n: nn.Module) -> Optional[nn.Linear]: + """ + Replaces MatchedScaleMaskSoftmax with exportable softmax layer + Args: + n: module to replace + Returns: + exportable module + """ + + mod = ExportableMatchedScaleMaskSoftmax(n.input_in_fp16, n.input_in_bf16, n.mask_func, n.softmax_in_fp32, n.scale) + + return mod + + def wrap_module(BaseT: Type[nn.Module], DestT: Type[nn.Module]) -> Callable[[nn.Module], Optional[nn.Module]]: """ Generic function generator to replace BaseT module with DestT wrapper. @@ -421,6 +422,7 @@ def replace_modules( "BatchNorm1d": wrap_module(nn.BatchNorm1d, CastToFloat), "BatchNorm2d": wrap_module(nn.BatchNorm2d, CastToFloat), "LayerNorm": wrap_module(nn.LayerNorm, CastToFloat), + "MatchedScaleMaskSoftmax": wrap_module(nn.Softmax, ExportableMatchedScaleMaskSoftmax), } diff --git a/scripts/export.py b/scripts/export.py index ca9c4fed64aa..b3d6317e936c 100644 --- a/scripts/export.py +++ b/scripts/export.py @@ -133,22 +133,21 @@ def nemo_export(argv): logging.info("Caching support is enabled.") autocast = nullcontext - model.to(device=args.device).freeze() - model.eval() - with torch.inference_mode(): - input_example = model.input_module.input_example(**in_args) - if check_trace: - check_trace = [input_example] - if max_dim: - in_args["max_dim"] = (max_dim + 1) // 2 - in_args["max_batch"] = (max_batch + 1) // 2 - input_example2 = model.input_module.input_example(**in_args) - check_trace.append(input_example2) - if args.autocast: autocast = torch.cuda.amp.autocast try: - with autocast(), torch.inference_mode(): + with autocast(), torch.no_grad(), torch.inference_mode(): + model.to(device=args.device).freeze() + model.eval() + input_example = None + if check_trace and len(in_args) > 0: + input_example = model.input_module.input_example(**in_args) + check_trace = [input_example] + for key, arg in in_args: + in_args[key] = (arg + 1) // 2 + input_example2 = model.input_module.input_example(**in_args) + check_trace.append(input_example2) + _, descriptions = model.export( out, input_example=input_example, From cbf6862ae14ae14df270e7ce6a1af343907b7a19 Mon Sep 17 00:00:00 2001 From: Evelina <10428420+ekmb@users.noreply.github.com> Date: Tue, 15 Nov 2022 14:32:58 -0800 Subject: [PATCH 180/244] disable pc test (#5426) Signed-off-by: ekmb Signed-off-by: ekmb --- Jenkinsfile | 36 +++++++++---------- .../wfst/wfst_text_normalization.rst | 2 +- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index d0bba5e46472..20d052127ae1 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1141,24 +1141,24 @@ pipeline { rm -rf "${data_dir}"' } } - stage('Test Restore Punctuation & Capitalization with RoBERTa') { - steps { - sh 'data_dir="$(mktemp -d -p "$(pwd)")" && \ - cp /home/TestData/nlp/token_classification_punctuation/*.txt "${data_dir}"/ && \ - python examples/nlp/token_classification/punctuation_capitalization_train_evaluate.py \ - +do_training=false \ - +do_testing=true \ - pretrained_model=/home/TestData/nlp/pretrained_models/Punctuation_and_Capitalization_roberta.nemo \ - +model.test_ds.use_cache=false \ - ~model.train_ds \ - ~model.validation_ds \ - model.test_ds.ds_item="${data_dir}" \ - trainer.devices=[1] \ - trainer.accelerator="gpu" \ - exp_manager=null && \ - rm -rf "${data_dir}"' - } - } +// stage('Test Restore Punctuation & Capitalization with RoBERTa') { +// steps { +// sh 'data_dir="$(mktemp -d -p "$(pwd)")" && \ +// cp /home/TestData/nlp/token_classification_punctuation/*.txt "${data_dir}"/ && \ +// python examples/nlp/token_classification/punctuation_capitalization_train_evaluate.py \ +// +do_training=false \ +// +do_testing=true \ +// pretrained_model=/home/TestData/nlp/pretrained_models/Punctuation_and_Capitalization_roberta.nemo \ +// +model.test_ds.use_cache=false \ +// ~model.train_ds \ +// ~model.validation_ds \ +// model.test_ds.ds_item="${data_dir}" \ +// trainer.devices=[1] \ +// trainer.accelerator="gpu" \ +// exp_manager=null && \ +// rm -rf "${data_dir}"' +// } +// } } } stage('L2: Dialogue Classification') { diff --git a/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst b/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst index 49817b72c89b..c20a088f4176 100644 --- a/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst +++ b/docs/source/nlp/text_normalization/wfst/wfst_text_normalization.rst @@ -157,7 +157,7 @@ Language Support Matrix +------------------+----------+----------+----------+--------------------+----------------------+ | Vietnamese | vi | | x | | | +------------------+----------+----------+----------+--------------------+----------------------+ -| Portugese | pt | | x | | | +| Portuguese | pt | | x | | | +------------------+----------+----------+----------+--------------------+----------------------+ | Chinese | zh | x | | | | +------------------+----------+----------+----------+--------------------+----------------------+ From b211849b1e6f302b53187f608bae5153e42e622d Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Tue, 15 Nov 2022 14:36:16 -0800 Subject: [PATCH 181/244] Fix GPT generation when using sentencepiece tokenizer (#5413) * Fix Signed-off-by: MaximumEntropy * Fix Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy Co-authored-by: Yi Dong Co-authored-by: Oleksii Kuchaiev --- .../language_modeling/megatron_gpt_eval.py | 16 ++++++-- .../modules/common/text_generation_utils.py | 39 +++++++++++++++---- 2 files changed, 44 insertions(+), 11 deletions(-) diff --git a/examples/nlp/language_modeling/megatron_gpt_eval.py b/examples/nlp/language_modeling/megatron_gpt_eval.py index 780def36f5d2..fb30622f7219 100644 --- a/examples/nlp/language_modeling/megatron_gpt_eval.py +++ b/examples/nlp/language_modeling/megatron_gpt_eval.py @@ -24,7 +24,7 @@ from nemo.collections.nlp.modules.common.text_generation_server import MegatronServer from nemo.collections.nlp.modules.common.text_generation_utils import generate from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam -from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy +from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector from nemo.core.config import hydra_runner from nemo.utils.app_state import AppState from nemo.utils.model_utils import inject_model_parallel_rank @@ -160,8 +160,15 @@ def main(cfg) -> None: ), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size" if cfg.gpt_model_file: + save_restore_connector = NLPSaveRestoreConnector() + if os.path.isdir(cfg.gpt_model_file): + save_restore_connector.model_extracted_dir = cfg.gpt_model_file + pretrained_cfg = MegatronGPTModel.restore_from( - restore_path=cfg.gpt_model_file, trainer=trainer, return_config=True + restore_path=cfg.gpt_model_file, + trainer=trainer, + return_config=True, + save_restore_connector=save_restore_connector, ) OmegaConf.set_struct(pretrained_cfg, True) with open_dict(pretrained_cfg): @@ -169,7 +176,10 @@ def main(cfg) -> None: pretrained_cfg.activations_checkpoint_granularity = None pretrained_cfg.activations_checkpoint_method = None model = MegatronGPTModel.restore_from( - restore_path=cfg.gpt_model_file, trainer=trainer, override_config_path=pretrained_cfg + restore_path=cfg.gpt_model_file, + trainer=trainer, + override_config_path=pretrained_cfg, + save_restore_connector=save_restore_connector, ) elif cfg.checkpoint_dir: app_state = AppState() diff --git a/nemo/collections/nlp/modules/common/text_generation_utils.py b/nemo/collections/nlp/modules/common/text_generation_utils.py index dbacc90eeda4..aeeedd24148f 100644 --- a/nemo/collections/nlp/modules/common/text_generation_utils.py +++ b/nemo/collections/nlp/modules/common/text_generation_utils.py @@ -14,6 +14,8 @@ """Utilities for generating text.""" +from collections.abc import Iterable + import torch import torch.nn.functional as F @@ -454,6 +456,21 @@ def generate( repetition_penalty=repetition_penalty, min_tokens_to_generate=min_tokens_to_generate, ) + special_tokens = set() + if hasattr(tokenizer, 'pad_token') and tokenizer.pad_token is not None: + special_tokens.add(tokenizer.pad_token) + if hasattr(tokenizer, 'eos_token') and tokenizer.eos_token is not None: + special_tokens.add(tokenizer.eos_token) + if hasattr(tokenizer, 'bos_token') and tokenizer.bos_token is not None: + special_tokens.add(tokenizer.bos_token) + if hasattr(tokenizer, 'cls_token') and tokenizer.cls_token is not None: + special_tokens.add(tokenizer.cls_token) + if hasattr(tokenizer, 'unk_token') and tokenizer.unk_token is not None: + special_tokens.add(tokenizer.unk_token) + if hasattr(tokenizer, 'sep_token') and tokenizer.sep_token is not None: + special_tokens.add(tokenizer.sep_token) + if hasattr(tokenizer, 'mask_token') and tokenizer.mask_token is not None: + special_tokens.add(tokenizer.mask_token) if output is not None: decode_tokens, output_logits, full_logits = output resp_sentences = [] @@ -466,25 +483,31 @@ def generate( if not isinstance(tokenizer, TabularTokenizer): words = [] for token in decode_token: - # Skip any soft prompt pseudo tokens - if token not in tokenizer.tokenizer.decoder: - continue - word = tokenizer.tokenizer.decoder[token] - word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode( - 'utf-8', errors='replace' - ) + if not isinstance(token, Iterable): + token = [token] + word = tokenizer.ids_to_tokens(token) + if isinstance(word, Iterable): + word = word[0] + if hasattr(tokenizer.tokenizer, 'byte_decoder'): + word = bytearray([tokenizer.tokenizer.byte_decoder[c] for c in word]).decode( + 'utf-8', errors='replace' + ) words.append(word) resp_sentences_seg.append(words) else: words = tokenizer.text_to_tokens(sentence) resp_sentences_seg.append(words) + # offsets calculation all_offsets = [] for item in resp_sentences_seg: offsets = [0] for index, token in enumerate(item): if index != len(item) - 1: - offsets.append(len(token) + offsets[-1]) + if token in special_tokens: + offsets.append(offsets[-1]) + else: + offsets.append(len(token) + offsets[-1]) all_offsets.append(offsets) output = {} From 01cd8b6b46dd75a0881258647aa66b65a9945fd5 Mon Sep 17 00:00:00 2001 From: Eric Harper Date: Tue, 15 Nov 2022 17:14:35 -0700 Subject: [PATCH 182/244] Disable sync_batch_comm in validation_step for GPT (#5397) * disable sync_batch_comm in validation_step Signed-off-by: ericharper * Read sync_batch_comm from config or default to False Signed-off-by: Markel Sanz Ausin * Update megatron_gpt_config to default sync_batch_comm to False to avoid CUDA error Signed-off-by: Markel Sanz Ausin * Empty Signed-off-by: MaximumEntropy * Comment out test Signed-off-by: MaximumEntropy Signed-off-by: ericharper Signed-off-by: Markel Sanz Ausin Signed-off-by: MaximumEntropy Signed-off-by: Oleksii Kuchaiev Co-authored-by: Oleksii Kuchaiev Co-authored-by: Markel Sanz Ausin Co-authored-by: Sandeep Subramanian Co-authored-by: Oleksii Kuchaiev --- .../language_modeling/conf/megatron_gpt_config.yaml | 4 ++-- .../nlp/models/language_modeling/megatron_gpt_model.py | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml b/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml index 73297588e9cf..79f1f1fbc406 100755 --- a/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml +++ b/examples/nlp/language_modeling/conf/megatron_gpt_config.yaml @@ -105,7 +105,7 @@ model: onnx_safe: False # Use work-arounds for known problems with Torch ONNX exporter. apex_transformer_log_level: 30 # Python logging level displays logs with severity greater than or equal to this gradient_as_bucket_view: True # PyTorch DDP argument. Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) - sync_batch_comm: True # Enable stream synchronization after each p2p communication between pipeline stages + sync_batch_comm: False # Enable stream synchronization after each p2p communication between pipeline stages ## Activation Checkpointing # NeMo Megatron supports 'selective' activation checkpointing where only the memory intensive part of attention is checkpointed. @@ -196,4 +196,4 @@ model: name: CosineAnnealing warmup_steps: 500 constant_steps: 50000 - min_lr: 2e-5 \ No newline at end of file + min_lr: 2e-5 diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py index b176592c4b63..921c25f43fc3 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_model.py @@ -310,7 +310,7 @@ def training_step(self, batch, batch_idx): grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, custom_sync_context_handler=custom_sync_context_handler, sequence_parallel_enabled=self.cfg.get('sequence_parallel', False), - sync_batch_comm=self.cfg.get('sync_batch_comm', True), + sync_batch_comm=self.cfg.get('sync_batch_comm', False), num_micro_batches_with_partial_activation_checkpoints=self.cfg.get( 'num_micro_batches_with_partial_activation_checkpoints', None ), @@ -541,7 +541,7 @@ def validation_step(self, batch, batch_idx): tensor_shape=tensor_shape, dtype=self.autocast_dtype, sequence_parallel_enabled=self.cfg.get('sequence_parallel', False), - sync_batch_comm=self.cfg.get('sync_batch_comm', True), + sync_batch_comm=self.cfg.get('sync_batch_comm', False), ) # only the last stage of the pipeline returns losses @@ -792,7 +792,8 @@ def setup(self, stage=None): else: self.model.sync_initial_word_embeddings() - self.setup_transformer_engine_tp_groups() + if self.cfg.get('transformer_engine', False): + self.setup_transformer_engine_tp_groups() def setup_training_data(self, cfg): if hasattr(self, '_train_ds'): @@ -841,7 +842,8 @@ def dummy(): self.trainer.strategy.launcher.launch(dummy, trainer=self.trainer) self.trainer.strategy.setup_environment() - self.setup_transformer_engine_tp_groups() + if self.cfg.get('transformer_engine', False): + self.setup_transformer_engine_tp_groups() # set the default sampling params if it is None. # default do greedy sampling From 792fc8ad937ec1936f43a11643e1f1a08db785fd Mon Sep 17 00:00:00 2001 From: yaoyu-33 <54727607+yaoyu-33@users.noreply.github.com> Date: Tue, 15 Nov 2022 17:18:07 -0800 Subject: [PATCH 183/244] Revert "Add temporary fix for CUDA issue in Dockerfile (#5421)" (#5431) This reverts commit 0718b17a8e1f89ee7e167698d1a5d5acad3f1b2a. --- Dockerfile | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 51a7fb676dae..cd78ef4348e1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -100,9 +100,5 @@ COPY tests /workspace/nemo/tests COPY tutorials /workspace/nemo/tutorials # COPY README.rst LICENSE /workspace/nemo/ -# Temporary fix CUDA issue -RUN sed -i "s/, all_gpu_ids//g" /opt/conda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cuda.py -RUN sed -i "s/all_gpu_ids =/\# all_gpu_ids =/g" /opt/conda/lib/python3.8/site-packages/pytorch_lightning/accelerators/cuda.py - RUN printf "#!/bin/bash\njupyter lab --no-browser --allow-root --ip=0.0.0.0" >> start-jupyter.sh && \ chmod +x start-jupyter.sh From 988dedb72c07fd9b4fd77e8cc0fa00d4de78eb2a Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Tue, 15 Nov 2022 18:24:26 -0800 Subject: [PATCH 184/244] Revert workaround for T5 that sets number of workers to 0 & sync_batch_comm=False (#5420) * Revert workers workaround Signed-off-by: MaximumEntropy * Fix in config Signed-off-by: MaximumEntropy * Fix Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy Co-authored-by: Oleksii Kuchaiev --- .../megatron_lm_encoder_decoder_model.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py b/nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py index b6d70dfb649e..72c14555a8ad 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_lm_encoder_decoder_model.py @@ -337,6 +337,7 @@ def training_step(self, batch, batch_idx): tensor_shape=tensor_shape, decoder_sequence_length=decoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, custom_sync_context_handler=custom_sync_context_handler, ) @@ -349,6 +350,7 @@ def training_step(self, batch, batch_idx): tensor_shape=tensor_shape, decoder_sequence_length=decoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, custom_sync_context_handler=custom_sync_context_handler, ) @@ -657,6 +659,7 @@ def validation_step_logits(self, batch, batch_idx): tensor_shape=tensor_shape, decoder_sequence_length=decoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, ) else: @@ -668,6 +671,7 @@ def validation_step_logits(self, batch, batch_idx): tensor_shape=tensor_shape, decoder_sequence_length=decoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, ) @@ -700,6 +704,7 @@ def validation_step(self, batch, batch_idx): tensor_shape=tensor_shape, decoder_sequence_length=decoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, ) else: @@ -711,6 +716,7 @@ def validation_step(self, batch, batch_idx): tensor_shape=tensor_shape, decoder_sequence_length=decoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, ) @@ -951,7 +957,7 @@ def setup_validation_data(self, cfg): if hasattr(self, '_validation_ds'): consumed_samples = 0 self._validation_dl = self.build_pretraining_data_loader( - self._validation_ds, consumed_samples, num_workers=0 + self._validation_ds, consumed_samples, num_workers=self._cfg.data.num_workers ) def setup_test_data(self, cfg): @@ -1042,6 +1048,7 @@ def dummy(): tensor_shape=tensor_shape, decoder_sequence_length=encoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), ) else: output_tensor = forward_backward_no_pipelining( @@ -1052,6 +1059,7 @@ def dummy(): tensor_shape=tensor_shape, decoder_sequence_length=encoder_seq_length, dtype=self.autocast_dtype, + sync_batch_comm=self.cfg.get('sync_batch_comm', False), ) if output_tensor: From 8615ab6aed2477a44cd307da24087e997d63391a Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 16 Nov 2022 05:50:44 -0800 Subject: [PATCH 185/244] Fixed discrepancies --- examples/tts/conf/mixer-tts.yaml | 6 +-- nemo/collections/tts/torch/data.py | 82 ++++++++---------------------- 2 files changed, 23 insertions(+), 65 deletions(-) diff --git a/examples/tts/conf/mixer-tts.yaml b/examples/tts/conf/mixer-tts.yaml index f24835739c91..c66aac76d446 100644 --- a/examples/tts/conf/mixer-tts.yaml +++ b/examples/tts/conf/mixer-tts.yaml @@ -4,9 +4,9 @@ name: Mixer-TTS -train_dataset: "ljspeech_ds/LJSpeech-1.1/train_manifest.json" -validation_datasets: "ljspeech_ds/LJSpeech-1.1/val_manifest.json" -sup_data_path: "ljspeech_ds/LJSpeech-1.1/" +train_dataset: ??? +validation_datasets: ??? +sup_data_path: ??? sup_data_types: [ "align_prior_matrix", "pitch" ] # Default values from librosa.pyin diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index f626388f86df..30e771bca4a4 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -32,7 +32,6 @@ BaseTokenizer, EnglishCharsTokenizer, EnglishPhonemesTokenizer, - IPATokenizer, ) from nemo.collections.tts.torch.helpers import ( BetaBinomialInterpolator, @@ -175,7 +174,7 @@ def __init__( self.text_tokenizer = text_tokenizer self.phoneme_probability = None - if isinstance(self.text_tokenizer, IPATokenizer): + if isinstance(self.text_tokenizer, BaseTokenizer): self.text_tokenizer_pad_id = text_tokenizer.pad self.tokens = text_tokenizer.tokens self.phoneme_probability = getattr(self.text_tokenizer, "phoneme_probability", None) @@ -210,7 +209,7 @@ def __init__( if isinstance(manifest_filepath, str): manifest_filepath = [manifest_filepath] self.manifest_filepath = manifest_filepath - self.lengths = [] + self.lengths = [] # Needed for BucketSampling data = [] total_duration = 0 @@ -242,9 +241,6 @@ def __init__( if self.cache_text: file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) - if self.cache_text: - file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) - data.append(file_info) self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) if file_info["duration"] is None: @@ -469,10 +465,7 @@ def get_log_mel(self, audio): mel = torch.matmul(self.fb.to(spec.dtype), spec) log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny)) return log_mel - def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result + def __getitem__(self, index): sample = self.data[index] audio_path_as_text_id = sample["audio_filepath"].replace("/", "-").split(".")[0] @@ -495,8 +488,7 @@ def __getitem__(self, index): audio, audio_length = features, torch.tensor(features.shape[0]).long() if "text_tokens" in sample: - text = sample["text_tokens"] - text = torch.tensor(text).long() + text = torch.tensor(sample["text_tokens"]).long() text_length = torch.tensor(len(text)).long() else: tokenized = self.text_tokenizer(sample["normalized_text"]) @@ -511,10 +503,7 @@ def __getitem__(self, index): if mel_path is not None and Path(mel_path).exists(): log_mel = torch.load(mel_path) else: - mel_folder = Path(self.sup_data_path) / "mel" - mel_folder.mkdir(exist_ok=True, parents=True) - - mel_path = mel_folder / f"mel{audio_path_as_text_id}.pt" + mel_path = self.log_mel_folder / f"{rel_audio_path_as_text_id}.pt" if mel_path.exists(): log_mel = torch.load(mel_path) @@ -580,10 +569,7 @@ def __getitem__(self, index): # Load energy if needed energy, energy_length = None, None if Energy in self.sup_data_types_set: - energy_folder = Path(self.sup_data_path) / "energy" - energy_folder.mkdir(exist_ok=True, parents=True) - - energy_path = energy_folder / f"energy{audio_path_as_text_id}.pt" + energy_path = self.energy_folder / f"{rel_audio_path_as_text_id}.pt" if energy_path.exists(): energy = torch.load(energy_path).float() @@ -859,12 +845,12 @@ def _collate_fn(self, batch): class VocoderDataset(Dataset): def __init__( self, - manifest_filepath: str, + manifest_filepath: Union[str, Path, List[str], List[Path]], sample_rate: int, n_segments: Optional[int] = None, - min_duration: Optional[float] = None, max_duration: Optional[float] = None, - ignore_file: Optional[str] = None, + min_duration: Optional[float] = None, + ignore_file: Optional[Union[str, Path]] = None, trim: Optional[bool] = False, load_precomputed_mel: bool = False, hop_length: Optional[int] = None, @@ -904,8 +890,12 @@ def __init__( if n_segments is None: raise ValueError("n_segments must be specified when load_precomputed_mel is True") - self.data = [] - audio_files = [] + # Initialize and read manifest file(s), filter out data by duration and ignore_file + if isinstance(manifest_filepath, str): + manifest_filepath = [manifest_filepath] + self.manifest_filepath = manifest_filepath + + data = [] total_duration = 0 for manifest_file in self.manifest_filepath: with open(Path(manifest_file).expanduser(), 'r') as f: @@ -922,7 +912,7 @@ def __init__( "duration": item["duration"] if "duration" in item else None, } - audio_files.append(file_info) + data.append(file_info) if file_info["duration"] is None: logging.info( @@ -933,45 +923,14 @@ def __init__( if total_duration is not None: total_duration += item["duration"] - logging.info(f"Loaded dataset with {len(audio_files)} files.") + logging.info(f"Loaded dataset with {len(data)} files.") if total_duration is not None: logging.info(f"Dataset contains {total_duration / 3600:.2f} hours.") - if ignore_file: - logging.info(f"using {ignore_file} to prune dataset.") - with open(Path(ignore_file).expanduser(), "rb") as f: - wavs_to_ignore = set(pickle.load(f)) - - pruned_duration = 0 if total_duration is not None else None - pruned_items = 0 - for item in audio_files: - audio_path = item['audio_filepath'] - audio_id = Path(audio_path).stem - - # Prune data according to min/max_duration & the ignore file - if total_duration is not None: - if (min_duration and item["duration"] < min_duration) or ( - max_duration and item["duration"] > max_duration - ): - pruned_duration += item["duration"] - pruned_items += 1 - continue - - if ignore_file and (audio_id in wavs_to_ignore): - pruned_items += 1 - pruned_duration += item["duration"] - wavs_to_ignore.remove(audio_id) - continue - - self.data.append(item) - - logging.info(f"Pruned {pruned_items} files. Final dataset contains {len(self.data)} files") - if pruned_duration is not None: - logging.info( - f"Pruned {pruned_duration / 3600:.2f} hours. Final dataset contains " - f"{(total_duration - pruned_duration) / 3600:.2f} hours." - ) + self.data = TTSDataset.filter_files(data, ignore_file, min_duration, max_duration, total_duration) + self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data]) + # Initialize audio and mel related parameters self.load_precomputed_mel = load_precomputed_mel self.featurizer = WaveformFeaturizer(sample_rate=sample_rate) self.sample_rate = sample_rate @@ -1028,7 +987,6 @@ def __getitem__(self, index): def __len__(self): return len(self.data) - class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): """ Maintain similar input lengths in a batch. From f682acf627e5b19502e71a902840492e4ee0ecfc Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 16 Nov 2022 06:12:01 -0800 Subject: [PATCH 186/244] updated Jenkisfile --- Jenkinsfile | 165 +--------------------------------------------------- 1 file changed, 1 insertion(+), 164 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index c235b9951a58..2dd4a6fc87dd 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1525,11 +1525,8 @@ pipeline { stage('L2: BERT Text Classification') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' + changeRequest target: 'r1.13.0' failFast true parallel { steps { @@ -1735,13 +1732,8 @@ pipeline { stage('L2: Intent and Slot Classification Tasks') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -1780,13 +1772,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Text Classification') { // when { // anyOf{ -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -1814,13 +1801,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Autoresume') { // when { // anyOf{ -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -1850,13 +1832,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Evaluation from .nemo') { // when { // anyOf{ -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -1876,13 +1853,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Train from .nemo') { // when { // anyOf{ -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -1904,13 +1876,8 @@ pipeline { stage('L2: Parallel NLP Examples 2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2034,13 +2001,8 @@ pipeline { stage('Punctuation & Capitalization tarred dataset') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2206,13 +2168,8 @@ pipeline { stage('Punctuation & Capitalization inference') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2237,13 +2194,8 @@ pipeline { stage('L2: Parallel Pretraining BERT pretraining from Text/Preprocessed') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2304,13 +2256,8 @@ pipeline { stage('L2: Entity Linking') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2337,13 +2284,8 @@ pipeline { stage('L2: NMT Attention is All You Need Training') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2465,13 +2407,8 @@ pipeline { stage('L2: NMT Attention is All You Need Inference') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2540,13 +2477,8 @@ pipeline { stage('L2: NMT with HuggingFace') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2622,13 +2554,8 @@ pipeline { stage('L2: NMT Tarred Dataset Creation') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -2776,13 +2703,8 @@ pipeline { // stage('L2: NMT Bottleneck Fallback') { // when { // anyOf { -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -2828,13 +2750,8 @@ pipeline { // stage('L2: NMT Bottleneck Architecture') { // when { // anyOf { -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -2916,13 +2833,8 @@ pipeline { // stage('L2: NMT Bottleneck LVM') { // when { // anyOf { -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -3004,13 +2916,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3234,13 +3141,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: BioMegatron Bert NER Task') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3257,13 +3159,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training TP=2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3334,13 +3231,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training PP=2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3411,13 +3303,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3524,13 +3411,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' // stage('L2: Megatron GPT Convert from Megatron-LM checkpoing and Eval') { // when { // anyOf { -<<<<<<< HEAD - // branch 'r1.9.0' - // changeRequest target: 'r1.9.0' -======= // branch 'r1.13.0' // changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 // } // } // failFast true @@ -3556,13 +3438,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron Change Partitions') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3600,13 +3477,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training TP=2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3701,13 +3573,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training PP=2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3955,13 +3822,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Eval') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -3977,13 +3839,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, TP=2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -4051,13 +3908,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, PP=2') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -4129,13 +3981,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 GLUE/XNLI Finetuning') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true @@ -4207,13 +4054,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: TTS Fast dev runs 1') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } parallel { @@ -4323,13 +4165,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L??: Speech Checkpoints tests') { when { anyOf { -<<<<<<< HEAD - branch 'r1.9.0' - changeRequest target: 'r1.9.0' -======= branch 'r1.13.0' changeRequest target: 'r1.13.0' ->>>>>>> origin/r1.13.0 } } failFast true From 719c55fa09462981d96d7ed537f307c839b175a8 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 16 Nov 2022 06:19:51 -0800 Subject: [PATCH 187/244] updated Jenkisfile --- Jenkinsfile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Jenkinsfile b/Jenkinsfile index 2dd4a6fc87dd..31364fe3ea4b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1527,8 +1527,11 @@ pipeline { anyOf { branch 'r1.13.0' changeRequest target: 'r1.13.0' + } + } failFast true parallel { + stage ('Text Classification with BERT Test') { steps { sh 'cd examples/nlp/text_classification && \ python text_classification_with_bert.py \ From ad67753333e3f00eae66c5e657fa1c56f4a001d7 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 16 Nov 2022 06:36:22 -0800 Subject: [PATCH 188/244] Cleaning --- examples/tts/conf/vits.yaml | 8 +- examples/tts/conf/vits_44100.yaml | 10 +- nemo/collections/tts/torch/data.py | 5 +- nemo/collections/tts/torch/tts_tokenizers.py | 456 ------------------ nemo_text_processing/g2p/data/data_utils.py | 3 +- reinstall.sh | 2 +- requirements/requirements.txt | 4 +- requirements/requirements_lightning.txt | 2 +- setup.py | 15 - .../Speaker_Diarization_Inference.ipynb | 5 - tutorials/tts/FastPitch_Finetuning.ipynb | 4 - .../tts/Inference_DurationPitchControl.ipynb | 4 - tutorials/tts/Inference_ModelSelect.ipynb | 4 - 13 files changed, 9 insertions(+), 513 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 0e2894dc6831..2ba641ed7fac 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -207,16 +207,10 @@ trainer: exp_manager: exp_dir: ??? name: ${name} - create_tensorboard_logger: false + create_tensorboard_logger: true create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: true - wandb_logger_kwargs: - name: ??? - project: ${name} - entity: nvidia - resume: "allow" resume_if_exists: false resume_ignore_no_checkpoint: false diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml index 0915844ac945..2cdb727949ac 100644 --- a/examples/tts/conf/vits_44100.yaml +++ b/examples/tts/conf/vits_44100.yaml @@ -203,18 +203,12 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_hifitts_fp16_local + exp_dir: ??? name: ${name} - create_tensorboard_logger: false + create_tensorboard_logger: true create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: true - wandb_logger_kwargs: - name: vits_hifitts_fp16_local - project: ${name} - entity: nvidia - resume: "allow" resume_if_exists: false resume_ignore_no_checkpoint: false diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 30e771bca4a4..9e257c8d7d2e 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import functools import json import math import os @@ -54,7 +53,6 @@ Voiced_mask, WithLens, ) - from nemo.core.classes import Dataset from nemo.utils import logging @@ -243,6 +241,7 @@ def __init__( data.append(file_info) self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) + if file_info["duration"] is None: logging.info( "Not all audio files have duration information. Duration logging will be disabled." @@ -453,7 +452,6 @@ def add_speaker_id(self, **kwargs): def get_spec(self, audio): with torch.cuda.amp.autocast(enabled=False): spec = self.stft(audio) - if spec.dtype in [torch.cfloat, torch.cdouble]: spec = torch.view_as_real(spec) spec = torch.sqrt(spec.pow(2).sum(-1) + EPSILON) @@ -468,7 +466,6 @@ def get_log_mel(self, audio): def __getitem__(self, index): sample = self.data[index] - audio_path_as_text_id = sample["audio_filepath"].replace("/", "-").split(".")[0] # Let's keep audio name and all internal directories in rel_audio_path_as_text_id to avoid any collisions rel_audio_path = Path(sample["audio_filepath"]).relative_to(self.base_data_dir).with_suffix("") diff --git a/nemo/collections/tts/torch/tts_tokenizers.py b/nemo/collections/tts/torch/tts_tokenizers.py index f3ba5c500664..0cac295b6c06 100644 --- a/nemo/collections/tts/torch/tts_tokenizers.py +++ b/nemo/collections/tts/torch/tts_tokenizers.py @@ -12,462 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import abc -import itertools -import string -from contextlib import contextmanager -from typing import List -import functools - -import re - -from nemo.collections.tts.torch.de_utils import german_text_preprocessing -from nemo.collections.tts.torch.en_utils import english_text_preprocessing -from nemo.utils import logging - - -class BaseTokenizer(abc.ABC): - PAD, BLANK, OOV = '', '', '' - - def __init__(self, tokens, *, pad=PAD, blank=BLANK, oov=OOV, sep='', add_blank_at=None): - """Abstract class for creating an arbitrary tokenizer to convert string to list of int tokens. - Args: - tokens: List of tokens. - pad: Pad token as string. - blank: Blank token as string. - oov: OOV token as string. - sep: Separation token as string. - add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), - if None then no blank in labels. - """ - super().__init__() - - tokens = list(tokens) - self.pad, tokens = 0, [pad] + tokens # Padding - - if add_blank_at is not None: - self.blank, tokens = len(tokens), tokens + [blank] # Reserved for blank from asr-model - else: - # use add_blank_at=None only for ASR where blank is added automatically, disable blank here - self.blank = None - - self.oov, tokens = len(tokens), tokens# + [oov] # Out Of Vocabulary - - if add_blank_at == "last": - tokens[-1], tokens[-2] = tokens[-2], tokens[-1] - self.oov, self.blank = self.blank, self.oov - - self.tokens = tokens - self.sep = sep - - self._util_ids = {self.pad, self.blank, self.oov} - self._token2id = {l: i for i, l in enumerate(tokens)} - self._id2token = tokens - - def __call__(self, text: str) -> List[int]: - return self.encode(text) - - @abc.abstractmethod - def encode(self, text: str) -> List[int]: - """Turns str text into int tokens.""" - pass - - def decode(self, tokens: List[int]) -> str: - """Turns ints tokens into str text.""" - return self.sep.join(self._id2token[t] for t in tokens if t not in self._util_ids) - - -class BaseCharsTokenizer(BaseTokenizer): - # fmt: off - PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally - ',', '.', '!', '?', '-', - ':', ';', '/', '"', '(', - ')', '[', ']', '{', '}', - ) - # fmt: on - - def __init__( - self, - chars, - punct=True, - apostrophe=True, - add_blank_at=None, - pad_with_space=False, - non_default_punct_list=None, - text_preprocessing_func=lambda x: x, - ): - """Base class for char-based tokenizer. - Args: - chars: string that represents all possible characters. - punct: Whether to reserve grapheme for basic punctuation or not. - apostrophe: Whether to use apostrophe or not. - add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), - if None then no blank in labels. - pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. - non_default_punct_list: List of punctuation marks which will be used instead default. - text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. - """ - - tokens = [] - self.space, tokens = len(tokens), tokens + [' '] # Space - tokens.extend(chars) - if apostrophe: - tokens.append("'") # Apostrophe for saving "don't" and "Joe's" - - if punct: - if non_default_punct_list is not None: - self.PUNCT_LIST = non_default_punct_list - tokens.extend(self.PUNCT_LIST) - - super().__init__(tokens, add_blank_at=add_blank_at) - - self.punct = punct - self.pad_with_space = pad_with_space - - self.text_preprocessing_func = text_preprocessing_func - - def encode(self, text): - """See base class.""" - cs, space, tokens = [], self.tokens[self.space], set(self.tokens) - - text = self.text_preprocessing_func(text) - for c in text: - # Add space if last one isn't one - if c == space and len(cs) > 0 and cs[-1] != space: - cs.append(c) - # Add next char - elif (c.isalnum() or c == "'") and c in tokens: - cs.append(c) - # Add punct - elif (c in self.PUNCT_LIST) and self.punct: - cs.append(c) - # Warn about unknown char - elif c != space: - logging.warning(f"Text: [{text}] contains unknown char: [{c}]. Symbol will be skipped.") - - # Remove trailing spaces - while cs[-1] == space: - cs.pop() - - if self.pad_with_space: - cs = [space] + cs + [space] - - return [self._token2id[p] for p in cs] - - -class EnglishCharsTokenizer(BaseCharsTokenizer): - def __init__( - self, - punct=True, - apostrophe=True, - add_blank_at=None, - pad_with_space=False, - non_default_punct_list=None, - text_preprocessing_func=english_text_preprocessing, - ): - """English char-based tokenizer. - Args: - punct: Whether to reserve grapheme for basic punctuation or not. - apostrophe: Whether to use apostrophe or not. - add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), - if None then no blank in labels. - pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. - non_default_punct_list: List of punctuation marks which will be used instead default. - text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. - Basically, it replaces all non-unicode characters with unicode ones and apply lower() function. - """ - super().__init__( - chars=string.ascii_lowercase, - punct=punct, - apostrophe=apostrophe, - add_blank_at=add_blank_at, - pad_with_space=pad_with_space, - non_default_punct_list=non_default_punct_list, - text_preprocessing_func=text_preprocessing_func, - ) - - -class GermanCharsTokenizer(BaseCharsTokenizer): - # fmt: off - PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally - ',', '.', '!', '?', '-', - ':', ';', '/', '"', '(', - ')', '[', ']', '{', '}', - ) - # fmt: on - - def __init__( - self, - punct=True, - apostrophe=True, - add_blank_at=None, - pad_with_space=False, - non_default_punct_list=None, - text_preprocessing_func=german_text_preprocessing, - ): - """Deutsch char-based tokenizer. - Args: - punct: Whether to reserve grapheme for basic punctuation or not. - apostrophe: Whether to use apostrophe or not. - add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), - if None then no blank in labels. - pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. - non_default_punct_list: List of punctuation marks which will be used instead default. - text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. - Currently, it only applies lower() function. - """ - - de_alphabet = "abcdefghijklmnopqrstuvwxyzäöüß" - super().__init__( - chars=de_alphabet, - punct=punct, - apostrophe=apostrophe, - add_blank_at=add_blank_at, - pad_with_space=pad_with_space, - non_default_punct_list=non_default_punct_list, - text_preprocessing_func=text_preprocessing_func, - ) - - -class EnglishPhonemesTokenizer(BaseTokenizer): - # fmt: off - PUNCT_LIST = ( # Derived from LJSpeech and "/" additionally - ',', '.', '!', '?', '-', - ':', ';', '/', '"', '(', - ')', '[', ']', '{', '}', - ) - VOWELS = ( - 'AA', 'AE', 'AH', 'AO', 'AW', - 'AY', 'EH', 'ER', 'EY', 'IH', - 'IY', 'OW', 'OY', 'UH', 'UW', - ) - CONSONANTS = ( - 'B', 'CH', 'D', 'DH', 'F', 'G', - 'HH', 'JH', 'K', 'L', 'M', 'N', - 'NG', 'P', 'R', 'S', 'SH', 'T', - 'TH', 'V', 'W', 'Y', 'Z', 'ZH', - ) - # fmt: on - - def __init__( - self, - g2p, - punct=True, - non_default_punct_list=None, - stresses=False, - chars=False, - *, - space=' ', - silence=None, - apostrophe=True, - oov=BaseTokenizer.OOV, - sep='|', # To be able to distinguish between 2/3 letters codes. - add_blank_at=None, - pad_with_space=False, - text_preprocessing_func=lambda text: english_text_preprocessing(text, lower=False), - ): - """English phoneme-based tokenizer. - Args: - g2p: Grapheme to phoneme module. - punct: Whether to reserve grapheme for basic punctuation or not. - non_default_punct_list: List of punctuation marks which will be used instead default. - stresses: Whether to use phonemes codes with stresses (0-2) or not. - chars: Whether to additionally use chars together with phonemes. It is useful if g2p module can return chars too. - space: Space token as string. - silence: Silence token as string (will be disabled if it is None). - apostrophe: Whether to use apostrophe or not. - oov: OOV token as string. - sep: Separation token as string. - add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), - if None then no blank in labels. - pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. - text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. - Basically, it replaces all non-unicode characters with unicode ones. - Note that lower() function shouldn't applied here, because text can contains phonemes (it will be handled by g2p). - """ - self.phoneme_probability = None - if hasattr(g2p, "phoneme_probability"): - self.phoneme_probability = g2p.phoneme_probability - - self.phoneme_probability = None - if hasattr(g2p, "phoneme_probability"): - self.phoneme_probability = g2p.phoneme_probability - tokens = [] - self.space, tokens = len(tokens), tokens + [space] # Space - - if silence is not None: - self.silence, tokens = len(tokens), tokens + [silence] # Silence - - tokens.extend(self.CONSONANTS) - vowels = list(self.VOWELS) - - if stresses: - vowels = [f'{p}{s}' for p, s in itertools.product(vowels, (0, 1, 2))] - tokens.extend(vowels) - - if chars or self.phoneme_probability is not None: - if not chars: - logging.warning( - "phoneme_probability was not None, characters will be enabled even though " - "chars was set to False." - ) - tokens.extend(string.ascii_lowercase) - - if apostrophe: - tokens.append("'") # Apostrophe - - if punct: - if non_default_punct_list is not None: - self.PUNCT_LIST = non_default_punct_list - tokens.extend(self.PUNCT_LIST) - - super().__init__(tokens, oov=oov, sep=sep, add_blank_at=add_blank_at) - - self.chars = chars if self.phoneme_probability is None else True - self.punct = punct - self.stresses = stresses - self.pad_with_space = pad_with_space - - self.text_preprocessing_func = text_preprocessing_func - self.g2p = g2p - - def encode(self, text): - """See base class.""" - ps, space, tokens = [], self.tokens[self.space], set(self.tokens) - - text = self.text_preprocessing_func(text) - g2p_text = self.g2p(text) # TODO: handle infer - - for p in g2p_text: # noqa - # Remove stress - if p.isalnum() and len(p) == 3 and not self.stresses: - p = p[:2] - - # Add space if last one isn't one - if p == space and len(ps) > 0 and ps[-1] != space: - ps.append(p) - # Add next phoneme or char (if chars=True) - elif (p.isalnum() or p == "'") and p in tokens: - ps.append(p) - # Add punct - elif (p in self.PUNCT_LIST) and self.punct: - ps.append(p) - # Warn about unknown char/phoneme - elif p != space: - logging.warning( - f"Text: [{''.join(g2p_text)}] contains unknown char/phoneme: [{p}]. Original text: [{text}]. Symbol will be skipped." - ) - - # Remove trailing spaces - while ps[-1] == space: - ps.pop() - - if self.pad_with_space: - ps = [space] + ps + [space] - - return [self._token2id[p] for p in ps] - - -class IPAPhonemesTokenizer(BaseTokenizer): - # fmt: off - - # _punctuation = ';:,.!?¡¿—…"«»“”#()-~[]|/' - # _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' - # _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻàãäåèéíîôõúûüăēĕĝğĩĭŏŝšũŭžǐǝǧʻˀ˥˦˧˨˩̝̞̠̥̪̃̆̊̍̚εابرسشصفلمهوᵐᵑᵝṣẽ​‍‎’⁠ⁿっゎッヮヶ�" - PAD = '_' - # fmt: on - - _punctuation = ';:,.!?¡¿—…"«»“”' - _letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' - _letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" - - PUNCT_LIST = [p for p in _punctuation] - - def __init__( - self, - g2p, - punct=True, - non_default_punct_list=None, - stresses=False, - chars=False, - *, - space=' ', - silence=None, - apostrophe=True, - oov=BaseTokenizer.OOV, - sep='|', # To be able to distinguish between 2/3 letters codes. - add_blank_at=None, - pad_with_space=False, - text_preprocessing_func=functools.partial(english_text_preprocessing, lower=False) - ): - """English phoneme-based tokenizer. - Args: - g2p: Grapheme to phoneme module. - punct: Whether to reserve grapheme for basic punctuation or not. - non_default_punct_list: List of punctuation marks which will be used instead default. - stresses: Whether to use phonemes codes with stresses (0-2) or not. - chars: Whether to additionally use chars together with phonemes. It is useful if g2p module can return chars too. - space: Space token as string. - silence: Silence token as string (will be disabled if it is None). - apostrophe: Whether to use apostrophe or not. - oov: OOV token as string. - sep: Separation token as string. - add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), - if None then no blank in labels. - pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. - text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. - Basically, it replaces all non-unicode characters with unicode ones. - Note that lower() function shouldn't applied here, because text can contains phonemes (it will be handled by g2p). - """ - self.phoneme_probability = None - if hasattr(g2p, "phoneme_probability"): - self.phoneme_probability = g2p.phoneme_probability - tokens = [] - - if punct: - tokens.extend(self.PUNCT_LIST) - - self.space, tokens = len(tokens), tokens + [space] # Space - - if silence is not None: - self.silence, tokens = len(tokens), tokens + [silence] # Silence - tokens.extend([l for l in self._letters]) - tokens.extend([l for l in self._letters_ipa]) - - super().__init__(tokens, oov=oov, pad=self.PAD, sep=sep, add_blank_at=add_blank_at) - - self.chars = chars - self.punct = punct - self.stresses = stresses - self.pad_with_space = pad_with_space - - self.text_preprocessing_func = text_preprocessing_func - self.g2p = g2p - - def encode(self, text): - """See base class.""" - - text = self.text_preprocessing_func(text) - - g2p_text = self.g2p(text) - - # Remove trailing spaces - # while ps[-1] == space: - # ps.pop() - - if self.pad_with_space: - g2p_text = self.tokens[self.space] + g2p_text + self.tokens[self.space] - - return [self._token2id[p] for p in g2p_text] - @contextmanager - def set_phone_prob(self, prob): - if hasattr(self.g2p, "phoneme_probability"): - self.g2p.phoneme_probability = prob - try: - yield - finally: - if hasattr(self.g2p, "phoneme_probability"): - self.g2p.phoneme_probability = self.phoneme_probability # TODO (xueyang): deprecate this file since no other places import modules from here anymore. However, # all checkpoints uploaded in ngc used this path. So it requires to update all ngc checkpoints path as well. from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import ( diff --git a/nemo_text_processing/g2p/data/data_utils.py b/nemo_text_processing/g2p/data/data_utils.py index 39bc0cbcbd07..a3ae92ccaf9d 100644 --- a/nemo_text_processing/g2p/data/data_utils.py +++ b/nemo_text_processing/g2p/data/data_utils.py @@ -30,7 +30,6 @@ } SYNOGLYPH2ASCII = {g: asc for asc, glyphs in _synoglyphs.items() for g in glyphs} - # Example of parsing by groups via _WORDS_RE. # Groups: # 1st group -- valid english words, @@ -103,7 +102,7 @@ def remove_punctuation(text: str, exclude: List[str] = None): return text.strip() -def english_text_preprocessing(text, lower=True, abbreviations=True): +def english_text_preprocessing(text, lower=True): text = unicode(text) text = ''.join(char for char in unicodedata.normalize('NFD', text) if unicodedata.category(char) != 'Mn') text = ''.join(char if char not in SYNOGLYPH2ASCII else SYNOGLYPH2ASCII[char] for char in text) diff --git a/reinstall.sh b/reinstall.sh index e09c7f385314..2711af17cfd9 100755 --- a/reinstall.sh +++ b/reinstall.sh @@ -17,7 +17,7 @@ ${PIP} uninstall -y nemo_tts ${PIP} uninstall -y nemo_simple_gan ${PIP} uninstall -y nemo_cv -${PIP} install -U setuptools==59.5.0 +${PIP} install -U setuptools if [ ! -z "${NVIDIA_PYTORCH_VERSION}" ] diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 753f77bb520a..8d5860cfe80f 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,8 +1,8 @@ -numpy==1.21 +numpy>=1.21 setuptools==59.5.0 onnx>=1.7.0 python-dateutil -torch>=1.12 +torch wrapt ruamel.yaml scikit-learn diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index 180b58b1017b..259bd1289dc7 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,4 +1,4 @@ -pytorch-lightning>=1.7.0, <=1.7.7 +pytorch-lightning>=1.7.0,<=1.7.7 torchmetrics>=0.4.1rc0 transformers>=4.0.1,<=4.21.2 webdataset>=0.1.48,<=0.1.62 diff --git a/setup.py b/setup.py index b96975e1098c..592cd4b415cb 100644 --- a/setup.py +++ b/setup.py @@ -25,11 +25,6 @@ from distutils import log as distutils_log from itertools import chain -# TODO: need to discuss how to do it correctly -# from distutils.core import setup -# from Cython.Build import cythonize -# import numpy - import setuptools spec = importlib.util.spec_from_file_location('package_info', 'nemo/package_info.py') @@ -141,16 +136,6 @@ def req_file(filename, folder="requirements"): tests_requirements = extras_require["test"] -############################################################################### -# Monotonic Align # -# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% # - -# TODO: need to discuss how to do it correctly -# setup( -# name='monotonic_align', -# ext_modules=cythonize("nemo/collections/tts/modules/monotonic_align/core.pyx"), -# include_dirs=[numpy.get_include()], -# ) ############################################################################### # Code style checkers # diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb index c2df61471ebf..c5e1eb3fb95f 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb @@ -963,10 +963,6 @@ "metadata": { "collapsed": false }, -<<<<<<< HEAD - "nbformat": 4, - "nbformat_minor": 4 -======= "source": [] } }, @@ -978,5 +974,4 @@ }, "nbformat": 4, "nbformat_minor": 4 ->>>>>>> origin/main } diff --git a/tutorials/tts/FastPitch_Finetuning.ipynb b/tutorials/tts/FastPitch_Finetuning.ipynb index 1cdd7a625394..039c87064879 100755 --- a/tutorials/tts/FastPitch_Finetuning.ipynb +++ b/tutorials/tts/FastPitch_Finetuning.ipynb @@ -57,11 +57,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", -<<<<<<< HEAD - "BRANCH = 'r1.9.0'\n", -======= "BRANCH = 'r1.13.0'\n", ->>>>>>> origin/r1.13.0 "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4\n", diff --git a/tutorials/tts/Inference_DurationPitchControl.ipynb b/tutorials/tts/Inference_DurationPitchControl.ipynb index 4b87cf251ec6..59a01c628449 100644 --- a/tutorials/tts/Inference_DurationPitchControl.ipynb +++ b/tutorials/tts/Inference_DurationPitchControl.ipynb @@ -46,11 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", -<<<<<<< HEAD - "BRANCH = 'r1.9.0'\n", -======= "BRANCH = 'r1.13.0'\n", ->>>>>>> origin/r1.13.0 "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/Inference_ModelSelect.ipynb b/tutorials/tts/Inference_ModelSelect.ipynb index e3d213a02a8a..71067530b311 100644 --- a/tutorials/tts/Inference_ModelSelect.ipynb +++ b/tutorials/tts/Inference_ModelSelect.ipynb @@ -46,11 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", -<<<<<<< HEAD - "BRANCH = 'r1.9.0'\n", -======= "BRANCH = 'r1.13.0'\n", ->>>>>>> origin/r1.13.0 "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", From 738e37d12750c2e513dd4471c5d486215db39b02 Mon Sep 17 00:00:00 2001 From: Vladimir Bataev Date: Thu, 17 Nov 2022 22:12:35 +0400 Subject: [PATCH 189/244] fixed the onnx bug in conformer for non-streaming models. (#5242) (#5446) Signed-off-by: Vahid Signed-off-by: Vahid Signed-off-by: Vladimir Bataev Signed-off-by: Vahid Signed-off-by: Vladimir Bataev Co-authored-by: Vahid Noroozi --- nemo/collections/asr/modules/conformer_encoder.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nemo/collections/asr/modules/conformer_encoder.py b/nemo/collections/asr/modules/conformer_encoder.py index e7d8f9c6f360..158532da2245 100644 --- a/nemo/collections/asr/modules/conformer_encoder.py +++ b/nemo/collections/asr/modules/conformer_encoder.py @@ -99,10 +99,10 @@ def input_example(self, max_batch=1, max_dim=256): if hasattr(self, 'export_cache_support') and self.export_cache_support: cache_last_channel = torch.randn(self.n_layers, max_batch, max_dim, self.d_model).to(dev) cache_last_time = torch.randn(self.n_layers, max_batch, self.d_model, self.conv_context_size[0]).to(dev) + all_input_example = tuple([input_example, input_example_length, cache_last_channel, cache_last_time]) else: - cache_last_channel = cache_last_time = None + all_input_example = tuple([input_example, input_example_length]) - all_input_example = tuple([input_example, input_example_length, cache_last_channel, cache_last_time]) return all_input_example @property From c170e03d5f4b3ddf6c205bbd4b811b641a11d1a4 Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Thu, 17 Nov 2022 13:52:18 -0800 Subject: [PATCH 190/244] Set sync_batch_comm in other places (#5448) Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy --- .../language_modeling/megatron_gpt_prompt_learning_model.py | 2 ++ .../language_modeling/megatron_t5_prompt_learning_model.py | 2 ++ nemo/collections/nlp/modules/common/text_generation_strategy.py | 2 ++ 3 files changed, 6 insertions(+) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py index 21ff3e6ad4d9..febec3fe0da5 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_gpt_prompt_learning_model.py @@ -585,6 +585,7 @@ def fwd_bwd_step(self, batch, batch_idx, forward_only): dtype=self.autocast_dtype, grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, sequence_parallel_enabled=self.cfg.get("sequence_parallel", False), + sync_batch_comm=self.frozen_model.cfg.get('sync_batch_comm', False), ) else: losses_reduced_per_micro_batch = forward_backward_no_pipelining( @@ -595,6 +596,7 @@ def fwd_bwd_step(self, batch, batch_idx, forward_only): tensor_shape=tensor_shape, dtype=self.autocast_dtype, grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, + sync_batch_comm=self.frozen_model.cfg.get('sync_batch_comm', False), ) # only the last stages of the pipeline return losses diff --git a/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py index 3c132abe1991..3e668347ce14 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py @@ -203,6 +203,7 @@ def fwd_bwd_step(self, batch, batch_idx, forward_only): dtype=self.autocast_dtype, grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, sequence_parallel_enabled=False, + sync_batch_comm=self.frozen_model.cfg.get('sync_batch_comm', False), ) else: losses_reduced_per_micro_batch = forward_backward_no_pipelining( @@ -214,6 +215,7 @@ def fwd_bwd_step(self, batch, batch_idx, forward_only): decoder_sequence_length=dec_seq_length, dtype=self.autocast_dtype, grad_scaler=self.trainer.precision_plugin.scaler if self.cfg.precision == 16 else None, + sync_batch_comm=self.frozen_model.cfg.get('sync_batch_comm', False), ) # only the last stages of the pipeline return losses diff --git a/nemo/collections/nlp/modules/common/text_generation_strategy.py b/nemo/collections/nlp/modules/common/text_generation_strategy.py index c6daf110b830..1bb9c3e41014 100644 --- a/nemo/collections/nlp/modules/common/text_generation_strategy.py +++ b/nemo/collections/nlp/modules/common/text_generation_strategy.py @@ -61,6 +61,7 @@ def forward_step(self, batch, tensor_shape): forward_only=True, tensor_shape=tensor_shape, dtype=self.model.autocast_dtype, + sync_batch_comm=self.model.cfg.get('sync_batch_comm', False), ) else: output_tensor = forward_backward_no_pipelining( @@ -70,6 +71,7 @@ def forward_step(self, batch, tensor_shape): forward_only=True, tensor_shape=tensor_shape, dtype=self.model.autocast_dtype, + sync_batch_comm=self.model.cfg.get('sync_batch_comm', False), ) return output_tensor From 542ab146bb4caa0312fab8b2c0f6356dac014932 Mon Sep 17 00:00:00 2001 From: Boris Fomitchev Date: Fri, 18 Nov 2022 10:01:33 -0800 Subject: [PATCH 191/244] Radtts 1.13 (#5451) * [TTS] Fixing RADTTS training - removing view buffer and fixing accuracy issue (#5358) * [TTS] add CI test for RADTTS training recipe. Signed-off-by: Boris Fomitchev Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Oleksii Kuchaiev --- Jenkinsfile | 37 ++++- .../collections/common/callbacks/callbacks.py | 2 +- nemo/collections/tts/models/radtts.py | 19 +-- nemo/collections/tts/modules/common.py | 95 ++++------- nemo/collections/tts/modules/radtts.py | 90 ++++------ nemo/collections/tts/modules/submodules.py | 155 +++++++++--------- nemo/core/classes/exportable.py | 26 +-- nemo/core/optim/radam.py | 4 +- nemo/utils/cast_utils.py | 2 +- nemo/utils/export_utils.py | 20 ++- scripts/export.py | 3 +- tests/collections/tts/test_tts_exportables.py | 36 +++- 12 files changed, 238 insertions(+), 251 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index 20d052127ae1..b8966e8e5bbd 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -4111,7 +4111,9 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' validation_datasets=/home/TestData/an4_dataset/an4_val.json \ sup_data_path=/home/TestData/an4_dataset/beta_priors \ trainer.devices="[0]" \ - +trainer.limit_train_batches=1 +trainer.limit_val_batches=1 trainer.max_epochs=1 \ + +trainer.limit_train_batches=1 \ + +trainer.limit_val_batches=1 \ + trainer.max_epochs=1 \ trainer.strategy=null \ model.train_ds.dataloader_params.batch_size=4 \ model.train_ds.dataloader_params.num_workers=0 \ @@ -4127,6 +4129,31 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' ~model.text_normalizer_call_kwargs' } } + stage('RADTTS') { + steps { + sh 'python examples/tts/radtts.py \ + train_dataset=/home/TestData/an4_dataset/an4_train.json \ + validation_datasets=/home/TestData/an4_dataset/an4_val.json \ + sup_data_path=/home/TestData/an4_dataset/radtts_beta_priors \ + trainer.devices="[0]" \ + +trainer.limit_train_batches=1 \ + +trainer.limit_val_batches=1 \ + trainer.max_epochs=1 \ + trainer.strategy=null \ + model.pitch_mean=212.35873413085938 \ + model.pitch_std=68.52806091308594 \ + model.train_ds.dataloader_params.batch_size=4 \ + model.train_ds.dataloader_params.num_workers=0 \ + model.validation_ds.dataloader_params.batch_size=4 \ + model.validation_ds.dataloader_params.num_workers=0 \ + export_dir=/home/TestData/radtts_test \ + model.optim.lr=0.0001 \ + model.modelConfig.decoder_use_partial_padding=True \ + ~trainer.check_val_every_n_epoch \ + ~model.text_normalizer \ + ~model.text_normalizer_call_kwargs' + } + } stage('Mixer-TTS') { steps { sh 'python examples/tts/mixer_tts.py \ @@ -4134,7 +4161,9 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' validation_datasets=/home/TestData/an4_dataset/an4_val.json \ sup_data_path=/home/TestData/an4_dataset/sup_data \ trainer.devices="[0]" \ - +trainer.limit_train_batches=1 +trainer.limit_val_batches=1 trainer.max_epochs=1 \ + +trainer.limit_train_batches=1 \ + +trainer.limit_val_batches=1 \ + trainer.max_epochs=1 \ trainer.strategy=null \ model.train_ds.dataloader_params.batch_size=4 \ model.train_ds.dataloader_params.num_workers=0 \ @@ -4151,7 +4180,9 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' train_dataset=/home/TestData/an4_dataset/an4_train.json \ validation_datasets=/home/TestData/an4_dataset/an4_val.json \ trainer.devices="[0]" \ - +trainer.limit_train_batches=1 +trainer.limit_val_batches=1 +trainer.max_epochs=1 \ + +trainer.limit_train_batches=1 \ + +trainer.limit_val_batches=1 \ + +trainer.max_epochs=1 \ trainer.strategy=null \ model.train_ds.dataloader_params.batch_size=4 \ model.train_ds.dataloader_params.num_workers=0 \ diff --git a/nemo/collections/common/callbacks/callbacks.py b/nemo/collections/common/callbacks/callbacks.py index 489c862b3780..1a6c011c38df 100644 --- a/nemo/collections/common/callbacks/callbacks.py +++ b/nemo/collections/common/callbacks/callbacks.py @@ -13,7 +13,7 @@ # limitations under the License. import time -from pytorch_lightning.callbacks.base import Callback +from pytorch_lightning.callbacks import Callback from pytorch_lightning.utilities import rank_zero_only # from sacrebleu import corpus_bleu diff --git a/nemo/collections/tts/models/radtts.py b/nemo/collections/tts/models/radtts.py index 47251b4a3f61..30b6189c484f 100644 --- a/nemo/collections/tts/models/radtts.py +++ b/nemo/collections/tts/models/radtts.py @@ -27,7 +27,6 @@ from nemo.collections.tts.helpers.helpers import plot_alignment_to_numpy from nemo.collections.tts.losses.radttsloss import AttentionBinarizationLoss, RADTTSLoss from nemo.collections.tts.models.base import SpectrogramGenerator -from nemo.collections.tts.modules.submodules import PartialConv1d from nemo.core.classes import Exportable from nemo.core.classes.common import typecheck from nemo.core.neural_types.elements import Index, MelSpectrogramType, TokenIndex @@ -159,7 +158,7 @@ def training_step(self, batch, batch_idx): loss_outputs['binarization_loss'] = (binarization_loss, 1.0) for k, (v, w) in loss_outputs.items(): - self.log("train/" + k, loss_outputs[k][0]) + self.log("train/" + k, loss_outputs[k][0], on_step=True) return {'loss': loss} @@ -229,7 +228,7 @@ def validation_epoch_end(self, outputs): for k, v in loss_outputs.items(): if k != "binarization_loss": - self.log("val/" + k, loss_outputs[k][0]) + self.log("val/" + k, loss_outputs[k][0], sync_dist=True, on_epoch=True) attn = outputs[0]["attn"] attn_soft = outputs[0]["attn_soft"] @@ -407,17 +406,3 @@ def output_module(self): def forward_for_export(self, text, lens, speaker_id, speaker_id_text, speaker_id_attributes): return self.model.forward_for_export(text, lens, speaker_id, speaker_id_text, speaker_id_attributes) - - def get_export_subnet(self, subnet=None): - return self.model.get_export_subnet(subnet) - - def _prepare_for_export(self, **kwargs): - """ - Override this method to prepare module for export. This is in-place operation. - Base version does common necessary module replacements (Apex etc) - """ - PartialConv1d.forward = PartialConv1d.forward_no_cache - super()._prepare_for_export(**kwargs) - - def _export_teardown(self): - PartialConv1d.forward = PartialConv1d.forward_with_cache diff --git a/nemo/collections/tts/modules/common.py b/nemo/collections/tts/modules/common.py index 7eff0c4c3baf..5d93340b4c2d 100644 --- a/nemo/collections/tts/modules/common.py +++ b/nemo/collections/tts/modules/common.py @@ -30,7 +30,7 @@ piecewise_linear_transform, unbounded_piecewise_quadratic_transform, ) -from nemo.collections.tts.modules.submodules import ConvNorm, LinearNorm +from nemo.collections.tts.modules.submodules import ConvNorm, LinearNorm, MaskedInstanceNorm1d @torch.jit.script @@ -45,7 +45,7 @@ def get_mask_from_lengths_and_val(lengths, val): max_len = val.shape[-1] ids = torch.arange(0, max_len, device=lengths.device) mask = ids < lengths.unsqueeze(1) - return mask.float() + return mask @torch.jit.script @@ -124,30 +124,31 @@ def lstm_tensor(self, context: Tensor, lens: Tensor, enforce_sorted: bool = Fals seq = nn.utils.rnn.pack_padded_sequence( context, lens.long().cpu(), batch_first=True, enforce_sorted=enforce_sorted ) - if not torch.jit.is_scripting(): + if not (torch.jit.is_scripting() or torch.jit.is_tracing()): self.bilstm.flatten_parameters() - ret, _ = self.bilstm(seq) + if hasattr(self.bilstm, 'forward'): + ret, _ = self.bilstm.forward(seq) + else: + ret, _ = self.bilstm.forward_1(seq) return nn.utils.rnn.pad_packed_sequence(ret, batch_first=True) @torch.jit.export def lstm_sequence(self, seq: PackedSequence) -> Tuple[Tensor, Tensor]: - if not torch.jit.is_scripting(): + if not (torch.jit.is_scripting() or torch.jit.is_tracing()): self.bilstm.flatten_parameters() - ret, _ = self.bilstm(seq) + if hasattr(self.bilstm, 'forward'): + ret, _ = self.bilstm.forward(seq) + elif hasattr(self.bilstm, 'forward_1'): + ret, _ = self.bilstm.forward_1(seq) return nn.utils.rnn.pad_packed_sequence(ret, batch_first=True) @torch.jit.export def sort_and_lstm_tensor(self, context: Tensor, lens: Tensor) -> Tensor: - lens_sorted, ids_sorted = torch.sort(lens, descending=True) - unsort_ids = torch.zeros_like(ids_sorted) - for i in range(ids_sorted.shape[0]): - unsort_ids[ids_sorted[i]] = i - context = context[ids_sorted] + context, lens_sorted, unsort_ids = sort_tensor(context, lens) seq = nn.utils.rnn.pack_padded_sequence( context, lens_sorted.long().cpu(), batch_first=True, enforce_sorted=True ) - ret, _ = self.bilstm(seq) - return nn.utils.rnn.pad_packed_sequence(ret, batch_first=True)[0][unsort_ids] + return self.lstm_sequence(seq)[0][unsort_ids] class ConvLSTMLinear(BiLSTM): @@ -161,14 +162,14 @@ def __init__( p_dropout=0.1, use_partial_padding=False, norm_fn=None, - lstm_norm_fn="spectral", ): super(ConvLSTMLinear, self).__init__(n_channels, int(n_channels // 2), 1) - self.out_dim = out_dim + self.convolutions = nn.ModuleList() if n_layers > 0: self.dropout = nn.Dropout(p=p_dropout) - self.convolutions = nn.ModuleList() + + use_weight_norm = norm_fn is None for i in range(n_layers): conv_layer = ConvNorm( @@ -179,14 +180,13 @@ def __init__( padding=int((kernel_size - 1) / 2), dilation=1, w_init_gain='relu', - use_weight_norm=False, + use_weight_norm=use_weight_norm, use_partial_padding=use_partial_padding, norm_fn=norm_fn, ) if norm_fn is not None: print("Applying {} norm to {}".format(norm_fn, conv_layer)) else: - conv_layer = torch.nn.utils.weight_norm(conv_layer.conv) print("Applying weight norm to {}".format(conv_layer)) self.convolutions.append(conv_layer) @@ -194,57 +194,23 @@ def __init__( if out_dim is not None: self.dense = nn.Linear(n_channels, out_dim) - @torch.jit.export - def conv_to_sequence(self, context: Tensor, lens: Tensor, enforce_sorted: bool = False) -> PackedSequence: - context_embedded = [] - bs: int = context.shape[0] - b_ind: int = 0 - for b_ind in range(bs): # TODO: speed up - curr_context = context[b_ind : b_ind + 1, :, : lens[b_ind]].clone() - for conv in self.convolutions: - curr_context = self.dropout(F.relu(conv(curr_context))) - context_embedded.append(curr_context[0].transpose(0, 1)) - seq = torch.nn.utils.rnn.pack_sequence(context_embedded, enforce_sorted=enforce_sorted) - return seq - - @torch.jit.export - def conv_to_padded_tensor(self, context: Tensor, lens: Tensor) -> Tensor: - context_embedded = [] - bs: int = context.shape[0] - b_ind: int = 0 - for b_ind in range(bs): # TODO: speed up - curr_context = context[b_ind : b_ind + 1, :, : lens[b_ind]].clone() - for conv in self.convolutions: - curr_context = self.dropout(F.relu(conv(curr_context))) - context_embedded.append(curr_context[0].transpose(0, 1)) - ret = torch.nn.utils.rnn.pad_sequence(context_embedded, batch_first=True) - return ret - - @torch.jit.export def masked_conv_to_sequence(self, context: Tensor, lens: Tensor, enforce_sorted: bool = False) -> PackedSequence: mask = get_mask_from_lengths_and_val(lens, context) - mask = mask.unsqueeze(1) + mask = mask.to(dtype=context.dtype).unsqueeze(1) for conv in self.convolutions: context = self.dropout(F.relu(conv(context, mask))) - context = torch.mul(context, mask) + context = context.transpose(1, 2) seq = torch.nn.utils.rnn.pack_padded_sequence( context, lens.long().cpu(), batch_first=True, enforce_sorted=enforce_sorted ) return seq - def forward(self, context: Tensor, lens: Optional[Tensor] = None) -> Tensor: - if lens is None: - for conv in self.convolutions: - context = self.dropout(F.relu(conv(context))) - context = context.transpose(1, 2) - context, _ = self.bilstm(context) - else: - # borisf : does not match ADLR (values, lengths) - # seq = self.masked_conv_to_sequence(context, lens, enforce_sorted=False) - # borisf : does match ADLR - seq = self.conv_to_sequence(context, lens, enforce_sorted=False) - context, _ = self.lstm_sequence(seq) + def forward(self, context: Tensor, lens: Tensor) -> Tensor: + context, lens, unsort_ids = sort_tensor(context, lens) + seq = self.masked_conv_to_sequence(context, lens, enforce_sorted=True) + context, _ = self.lstm_sequence(seq) + context = context[unsort_ids] if self.dense is not None: context = self.dense(context).permute(0, 2, 1) @@ -252,12 +218,8 @@ def forward(self, context: Tensor, lens: Optional[Tensor] = None) -> Tensor: return context -def getRadTTSEncoder( - encoder_n_convolutions=3, - encoder_embedding_dim=512, - encoder_kernel_size=5, - norm_fn=nn.BatchNorm1d, - lstm_norm_fn=None, +def get_radtts_encoder( + encoder_n_convolutions=3, encoder_embedding_dim=512, encoder_kernel_size=5, norm_fn=MaskedInstanceNorm1d, ): return ConvLSTMLinear( in_dim=encoder_embedding_dim, @@ -267,7 +229,6 @@ def getRadTTSEncoder( p_dropout=0.5, use_partial_padding=True, norm_fn=norm_fn, - lstm_norm_fn=lstm_norm_fn, ) @@ -275,7 +236,7 @@ class Invertible1x1ConvLUS(torch.nn.Module): def __init__(self, c): super(Invertible1x1ConvLUS, self).__init__() # Sample a random orthonormal matrix to initialize weights - W = torch.qr(torch.FloatTensor(c, c).normal_())[0] + W, _ = torch.linalg.qr(torch.FloatTensor(c, c).normal_()) # Ensure determinant is 1.0 not -1.0 if torch.det(W) < 0: W[:, 0] = -1 * W[:, 0] diff --git a/nemo/collections/tts/modules/radtts.py b/nemo/collections/tts/modules/radtts.py index 83bbcda58230..d41e7dd628e5 100644 --- a/nemo/collections/tts/modules/radtts.py +++ b/nemo/collections/tts/modules/radtts.py @@ -31,10 +31,8 @@ Invertible1x1ConvLUS, LinearNorm, get_mask_from_lengths, - getRadTTSEncoder, - sort_tensor, + get_radtts_encoder, ) -from nemo.collections.tts.modules.submodules import PartialConv1d from nemo.core.classes import Exportable, NeuralModule from nemo.core.neural_types.elements import Index, LengthsType, MelSpectrogramType, TokenDurationType, TokenIndex from nemo.core.neural_types.neural_type import NeuralType @@ -62,11 +60,11 @@ def pad_energy_avg_and_f0(energy_avg, f0, max_out_len): def adjust_f0(f0, f0_mean, f0_std, vmask_bool): if f0_mean > 0.0: - f0_mu, f0_sigma = f0[vmask_bool].mean(), f0[vmask_bool].std() - f0[vmask_bool] = (f0[vmask_bool] - f0_mu) / f0_sigma + f0_sigma, f0_mu = torch.std_mean(f0[vmask_bool]) + f0 = ((f0 - f0_mu) / f0_sigma).to(dtype=f0.dtype) f0_std = f0_std if f0_std > 0 else f0_sigma - f0[vmask_bool] = f0[vmask_bool] * f0_std + f0_mean - return f0 + f0 = (f0 * f0_std + f0_mean).to(dtype=f0.dtype) + return f0.masked_fill(~vmask_bool, 0.0) class FlowStep(nn.Module): @@ -146,8 +144,6 @@ def __init__( n_flows, n_conv_layers_per_step, n_mel_channels, - n_hidden, - mel_encoder_n_hidden, dummy_speaker_embedding, n_early_size, n_early_every, @@ -185,9 +181,7 @@ def __init__( self.speaker_embedding = torch.nn.Embedding(n_speakers, self.n_speaker_dim) self.embedding = torch.nn.Embedding(n_text, n_text_dim) self.flows = torch.nn.ModuleList() - self.encoder = getRadTTSEncoder( - encoder_embedding_dim=n_text_dim, norm_fn=nn.InstanceNorm1d, lstm_norm_fn=text_encoder_lstm_norm - ) + self.encoder = get_radtts_encoder(encoder_embedding_dim=n_text_dim) self.dummy_speaker_embedding = dummy_speaker_embedding self.learn_alignments = learn_alignments self.affine_activation = affine_activation @@ -196,11 +190,11 @@ def __init__( self.use_context_lstm = bool(use_context_lstm) self.context_lstm_norm = context_lstm_norm self.context_lstm_w_f0_and_energy = context_lstm_w_f0_and_energy - # self.length_regulator = LengthRegulator() self.use_first_order_features = bool(use_first_order_features) self.decoder_use_unvoiced_bias = kwargs['decoder_use_unvoiced_bias'] self.ap_pred_log_f0 = ap_pred_log_f0 self.ap_use_unvoiced_bias = kwargs['ap_use_unvoiced_bias'] + if 'atn' in include_modules or 'dec' in include_modules: if self.learn_alignments: self.attention = ConvAttention(n_mel_channels, self.n_speaker_dim, n_text_dim) @@ -218,12 +212,6 @@ def __init__( n_in_context_lstm = n_f0_dims + n_energy_avg_dims + n_text_dim n_in_context_lstm *= n_group_size n_in_context_lstm += self.n_speaker_dim - - n_context_hidden = n_f0_dims + n_energy_avg_dims + n_text_dim - n_context_hidden = n_context_hidden * n_group_size / 2 - n_context_hidden = self.n_speaker_dim + n_context_hidden - n_context_hidden = int(n_context_hidden) - n_flowstep_cond_dims = self.n_speaker_dim + n_text_dim * n_group_size self.context_lstm = BiLSTM( @@ -358,7 +346,7 @@ def preprocess_context(self, context, speaker_vecs, out_lens, f0, energy_avg): context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1) unfolded_out_lens = out_lens // self.n_group_size - context_lstm_padded_output, _ = self.context_lstm.lstm_tensor( + context_lstm_padded_output = self.context_lstm.sort_and_lstm_tensor( context_w_spkvec.transpose(1, 2), unfolded_out_lens ) context_w_spkvec = context_lstm_padded_output.transpose(1, 2) @@ -466,10 +454,11 @@ def forward( f0_bias = 0 # unvoiced bias forward pass + voiced_mask_bool = voiced_mask.bool() if self.use_unvoiced_bias: f0_bias = self.unvoiced_bias_module(context.permute(0, 2, 1)) f0_bias = -f0_bias[..., 0] - f0_bias = f0_bias * (~voiced_mask.bool()).float() + f0_bias.masked_fill_(voiced_mask_bool, 0.0) # mel decoder forward pass if 'dec' in self.include_modules: @@ -478,7 +467,6 @@ def forward( # sometimes referred to as the "squeeze" operation # invert this by calling self.fold(mel_or_z) mel = self.unfold(mel.unsqueeze(-1)) - z_out = [] # where context is folded # mask f0 in case values are interpolated context_w_spkvec = self.preprocess_context( @@ -542,7 +530,7 @@ def forward( else: f0_target = torch.detach(f0) # fit to log f0 in f0 predictor - f0_target[voiced_mask.bool()] = torch.log(f0_target[voiced_mask.bool()]) + f0_target[voiced_mask_bool] = torch.log(f0_target[voiced_mask_bool]) f0_target = f0_target / 6 # scale to ~ [0, 1] in log space energy_avg = energy_avg * 2 - 1 # scale to ~ [-1, 1] @@ -603,8 +591,6 @@ def infer( voiced_mask=None, ): - # print ("Text, lens: ", text.shape, in_lens.shape) - batch_size = text.shape[0] n_tokens = text.shape[1] spk_vec = self.encode_speaker(speaker_id) @@ -615,7 +601,6 @@ def infer( spk_vec_text = self.encode_speaker(speaker_id_text) spk_vec_attributes = self.encode_speaker(speaker_id_attributes) txt_enc, _ = self.encode_text(text, in_lens) - print("txt_enc: ", txt_enc.shape) if dur is None: # get token durations @@ -626,9 +611,7 @@ def infer( dur = dur[:, 0] dur = dur.clamp(0, token_duration_max) - # get attributes f0, energy, vpred, etc) txt_enc_time_expanded, out_lens = regulate_len(dur, txt_enc.transpose(1, 2), pace) - # print ("txt_enc_time_expanded, out_lens, dur: ", txt_enc_time_expanded.shape, out_lens, dur) n_groups = torch.div(out_lens, self.n_group_size, rounding_mode='floor') max_out_len = torch.max(out_lens) @@ -637,8 +620,10 @@ def infer( if self.use_vpred_module: # get logits voiced_mask = self.v_pred_module.infer(None, txt_enc_time_expanded, spk_vec_attributes, lens=out_lens) - voiced_mask = torch.sigmoid(voiced_mask[:, 0]) > 0.5 - voiced_mask = voiced_mask.float() + voiced_mask_bool = torch.sigmoid(voiced_mask[:, 0]) > 0.5 + voiced_mask = voiced_mask_bool.to(dur.dtype) + else: + voiced_mask_bool = voiced_mask.bool() ap_txt_enc_time_expanded = txt_enc_time_expanded # voice mask augmentation only used for attribute prediction @@ -650,14 +635,13 @@ def infer( if self.use_unvoiced_bias: f0_bias = self.unvoiced_bias_module(txt_enc_time_expanded.permute(0, 2, 1)) f0_bias = -f0_bias[..., 0] - f0_bias = f0_bias * (~voiced_mask.bool()).float() if f0 is None: n_f0_feature_channels = 2 if self.use_first_order_features else 1 z_f0 = torch.normal(txt_enc.new_zeros(batch_size, n_f0_feature_channels, max_out_len)) * sigma_f0 - f0 = self.infer_f0(z_f0, ap_txt_enc_time_expanded, spk_vec_attributes, voiced_mask, out_lens)[:, 0] + f0 = self.infer_f0(z_f0, ap_txt_enc_time_expanded, spk_vec_attributes, voiced_mask_bool, out_lens)[:, 0] - f0 = adjust_f0(f0, f0_mean, f0_std, voiced_mask.to(dtype=bool)) + f0 = adjust_f0(f0, f0_mean, f0_std, voiced_mask_bool) if energy_avg is None: n_energy_feature_channels = 2 if self.use_first_order_features else 1 @@ -669,20 +653,17 @@ def infer( # replication pad, because ungrouping with different group sizes # may lead to mismatched lengths # FIXME: use replication pad - print("V mask, energy_avg, f0, f0_bias: ", voiced_mask.shape, energy_avg.shape, f0.shape, f0_bias.shape) (energy_avg, f0) = pad_energy_avg_and_f0(energy_avg, f0, max_out_len) - print("V mask, energy_avg, f0, f0_bias: ", voiced_mask.shape, energy_avg.shape, f0.shape, f0_bias.shape) context_w_spkvec = self.preprocess_context( - txt_enc_time_expanded, spk_vec, out_lens, f0 * voiced_mask + f0_bias, energy_avg + txt_enc_time_expanded, spk_vec, out_lens, (f0 + f0_bias) * voiced_mask, energy_avg ) residual = torch.normal(txt_enc.new_zeros(batch_size, 80 * self.n_group_size, torch.max(n_groups))) * sigma # map from z sample to data num_steps_to_exit = len(self.exit_steps) - mel = residual[:, num_steps_to_exit * self.n_early_size :] - remaining_residual = residual[:, : num_steps_to_exit * self.n_early_size] + remaining_residual, mel = torch.tensor_split(residual, [num_steps_to_exit * self.n_early_size,], dim=1) for i, flow_step in enumerate(reversed(self.flows)): curr_step = self.n_flows - i - 1 @@ -690,22 +671,19 @@ def infer( if num_steps_to_exit > 0 and curr_step == self.exit_steps[num_steps_to_exit - 1]: # concatenate the next chunk of z num_steps_to_exit = num_steps_to_exit - 1 - residual_to_add = remaining_residual[:, num_steps_to_exit * self.n_early_size :] - remaining_residual = remaining_residual[:, : num_steps_to_exit * self.n_early_size] + remaining_residual, residual_to_add = torch.tensor_split( + remaining_residual, [num_steps_to_exit * self.n_early_size,], dim=1 + ) mel = torch.cat((residual_to_add, mel), 1) if self.n_group_size > 1: mel = self.fold(mel) - # print ("mel=", mel.shape, "out_lens=", out_lens, "dur=", dur.shape) - return {'mel': mel, 'out_lens': out_lens, 'dur': dur, 'f0': f0, 'energy_avg': energy_avg} def infer_f0(self, residual, txt_enc_time_expanded, spk_vec, voiced_mask=None, lens=None): f0 = self.f0_pred_module.infer(residual, txt_enc_time_expanded, spk_vec, lens) - if voiced_mask is not None and len(voiced_mask.shape) == 2: - voiced_mask = voiced_mask[:, None] # constants if self.ap_pred_log_f0: if self.use_first_order_features: @@ -720,14 +698,15 @@ def infer_f0(self, residual, txt_enc_time_expanded, spk_vec, voiced_mask=None, l if voiced_mask is None: voiced_mask = f0 > 0.0 else: - voiced_mask = voiced_mask.bool() - # due to grouping, f0 might be 1 frame short - voiced_mask = voiced_mask[:, :, : f0.shape[-1]] + if len(voiced_mask.shape) == 2: + voiced_mask = voiced_mask[:, None] + # due to grouping, f0 might be 1 frame short + voiced_mask = voiced_mask[:, :, : f0.shape[-1]] + if self.ap_pred_log_f0: # if variable is set, decoder sees linear f0 - # mask = f0 > 0.0 if voiced_mask is None else voiced_mask.bool() - f0[voiced_mask] = torch.exp(f0[voiced_mask]).to(f0) - f0[~voiced_mask] = 0.0 + f0 = torch.exp(f0).to(dtype=f0.dtype) + f0.masked_fill_(~voiced_mask, 0.0) return f0 def infer_energy(self, residual, txt_enc_time_expanded, spk_vec, lens): @@ -783,17 +762,8 @@ def output_types(self): # Methods for model exportability def _prepare_for_export(self, **kwargs): - PartialConv1d.forward = PartialConv1d.forward_no_cache self.remove_norms() super()._prepare_for_export(**kwargs) - self.encoder = torch.jit.script(self.encoder) - self.v_pred_module.feat_pred_fn = torch.jit.script(self.v_pred_module.feat_pred_fn) - self.f0_pred_module.feat_pred_fn = torch.jit.script(self.f0_pred_module.feat_pred_fn) - self.energy_pred_module.feat_pred_fn = torch.jit.script(self.energy_pred_module.feat_pred_fn) - self.dur_pred_layer.feat_pred_fn = torch.jit.script(self.dur_pred_layer.feat_pred_fn) - - if self.use_context_lstm: - self.context_lstm = torch.jit.script(self.context_lstm) def input_example(self, max_batch=1, max_dim=256): """ @@ -804,7 +774,7 @@ def input_example(self, max_batch=1, max_dim=256): par = next(self.parameters()) sz = (max_batch, max_dim) inp = torch.randint(0, 16, sz, device=par.device, dtype=torch.int64) - lens = torch.randint(0, max_dim, (max_batch,), device=par.device, dtype=torch.int) + lens = torch.randint(16, max_dim, (max_batch,), device=par.device, dtype=torch.int) speaker = torch.randint(0, 1, (max_batch,), device=par.device, dtype=torch.int64) inputs = { 'text': inp, diff --git a/nemo/collections/tts/modules/submodules.py b/nemo/collections/tts/modules/submodules.py index 90dd822e1650..e61b9b224885 100644 --- a/nemo/collections/tts/modules/submodules.py +++ b/nemo/collections/tts/modules/submodules.py @@ -12,13 +12,56 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple +from typing import Tuple import torch +from torch import Tensor from torch.autograd import Variable from torch.nn import functional as F +def masked_instance_norm( + input: Tensor, mask: Tensor, weight: Tensor, bias: Tensor, momentum: float, eps: float = 1e-5, +) -> Tensor: + r"""Applies Masked Instance Normalization for each channel in each data sample in a batch. + + See :class:`~MaskedInstanceNorm1d` for details. + """ + lengths = mask.sum((-1,)) + mean = (input * mask).sum((-1,)) / lengths # (N, C) + var = (((input - mean[(..., None)]) * mask) ** 2).sum((-1,)) / lengths # (N, C) + out = (input - mean[(..., None)]) / torch.sqrt(var[(..., None)] + eps) # (N, C, ...) + out = out * weight[None, :][(..., None)] + bias[None, :][(..., None)] + + return out + + +class MaskedInstanceNorm1d(torch.nn.InstanceNorm1d): + r"""Applies Instance Normalization over a masked 3D input + (a mini-batch of 1D inputs with additional channel dimension).. + + See documentation of :class:`~torch.nn.InstanceNorm1d` for details. + + Shape: + - Input: :math:`(N, C, L)` + - Mask: :math:`(N, 1, L)` + - Output: :math:`(N, C, L)` (same shape as input) + """ + + def __init__( + self, + num_features: int, + eps: float = 1e-5, + momentum: float = 0.1, + affine: bool = False, + track_running_stats: bool = False, + ) -> None: + super(MaskedInstanceNorm1d, self).__init__(num_features, eps, momentum, affine, track_running_stats) + + def forward(self, input: Tensor, mask: Tensor) -> Tensor: + return masked_instance_norm(input, mask, self.weight, self.bias, self.momentum, self.eps,) + + class PartialConv1d(torch.nn.Conv1d): """ Zero padding creates a unique identifier for where the edge of the data is, such that the model can almost always identify @@ -26,31 +69,22 @@ class PartialConv1d(torch.nn.Conv1d): this affect. """ + __constants__ = ['slide_winsize'] + slide_winsize: float + def __init__(self, *args, **kwargs): super(PartialConv1d, self).__init__(*args, **kwargs) weight_maskUpdater = torch.ones(1, 1, self.kernel_size[0]) self.register_buffer("weight_maskUpdater", weight_maskUpdater, persistent=False) - slide_winsize = torch.tensor(self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2]) - self.register_buffer("slide_winsize", slide_winsize, persistent=False) + self.slide_winsize = self.weight_maskUpdater.shape[1] * self.weight_maskUpdater.shape[2] - if self.bias is not None: - bias_view = self.bias.view(1, self.out_channels, 1) - self.register_buffer('bias_view', bias_view, persistent=False) - # caching part - self.last_size = (-1, -1, -1) - - update_mask = torch.ones(1, 1, 1) - self.register_buffer('update_mask', update_mask, persistent=False) - mask_ratio = torch.ones(1, 1, 1) - self.register_buffer('mask_ratio', mask_ratio, persistent=False) - self.partial: bool = True - - def calculate_mask(self, input: torch.Tensor, mask_in: Optional[torch.Tensor]): + def forward(self, input, mask_in): + if mask_in is None: + mask = torch.ones(1, 1, input.shape[2], dtype=input.dtype, device=input.device) + else: + mask = mask_in + input = torch.mul(input, mask) with torch.no_grad(): - if mask_in is None: - mask = torch.ones(1, 1, input.shape[2], dtype=input.dtype, device=input.device) - else: - mask = mask_in update_mask = F.conv1d( mask, self.weight_maskUpdater, @@ -60,58 +94,22 @@ def calculate_mask(self, input: torch.Tensor, mask_in: Optional[torch.Tensor]): dilation=self.dilation, groups=1, ) - # for mixed precision training, change 1e-8 to 1e-6 - mask_ratio = self.slide_winsize / (update_mask + 1e-6) + update_mask_filled = torch.masked_fill(update_mask, update_mask == 0, self.slide_winsize) + mask_ratio = self.slide_winsize / update_mask_filled update_mask = torch.clamp(update_mask, 0, 1) - mask_ratio = torch.mul(mask_ratio.to(update_mask), update_mask) - return torch.mul(input, mask), mask_ratio, update_mask - - def forward_aux(self, input: torch.Tensor, mask_ratio: torch.Tensor, update_mask: torch.Tensor) -> torch.Tensor: - assert len(input.shape) == 3 + mask_ratio = torch.mul(mask_ratio, update_mask) raw_out = self._conv_forward(input, self.weight, self.bias) if self.bias is not None: - output = torch.mul(raw_out - self.bias_view, mask_ratio) + self.bias_view + bias_view = self.bias.view(1, self.out_channels, 1) + output = torch.mul(raw_out - bias_view, mask_ratio) + bias_view output = torch.mul(output, update_mask) else: output = torch.mul(raw_out, mask_ratio) return output - @torch.jit.ignore - def forward_with_cache(self, input: torch.Tensor, mask_in: Optional[torch.Tensor] = None) -> torch.Tensor: - use_cache = not (torch.jit.is_tracing() or torch.onnx.is_in_onnx_export()) - cache_hit = use_cache and mask_in is None and self.last_size == input.shape - if cache_hit: - mask_ratio = self.mask_ratio - update_mask = self.update_mask - else: - input, mask_ratio, update_mask = self.calculate_mask(input, mask_in) - if use_cache: - # if a mask is input, or tensor shape changed, update mask ratio - self.last_size = tuple(input.shape) - self.update_mask = update_mask - self.mask_ratio = mask_ratio - return self.forward_aux(input, mask_ratio, update_mask) - - def forward_no_cache(self, input: torch.Tensor, mask_in: Optional[torch.Tensor] = None) -> torch.Tensor: - if self.partial: - input, mask_ratio, update_mask = self.calculate_mask(input, mask_in) - return self.forward_aux(input, mask_ratio, update_mask) - else: - if mask_in is not None: - input = torch.mul(input, mask_in) - return self._conv_forward(input, self.weight, self.bias) - - def forward(self, input: torch.Tensor, mask_in: Optional[torch.Tensor] = None) -> torch.Tensor: - if self.partial: - return self.forward_with_cache(input, mask_in) - else: - if mask_in is not None: - input = torch.mul(input, mask_in) - return self._conv_forward(input, self.weight, self.bias) - class LinearNorm(torch.nn.Module): def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'): @@ -125,6 +123,9 @@ def forward(self, x): class ConvNorm(torch.nn.Module): + __constants__ = ['use_partial_padding'] + use_partial_padding: bool + def __init__( self, in_channels, @@ -135,16 +136,19 @@ def __init__( dilation=1, bias=True, w_init_gain='linear', - use_partial_padding: bool = False, - use_weight_norm: bool = False, + use_partial_padding=False, + use_weight_norm=False, norm_fn=None, ): super(ConvNorm, self).__init__() if padding is None: assert kernel_size % 2 == 1 padding = int(dilation * (kernel_size - 1) / 2) - self.use_partial_padding: bool = use_partial_padding - conv = PartialConv1d( + self.use_partial_padding = use_partial_padding + conv_fn = torch.nn.Conv1d + if use_partial_padding: + conv_fn = PartialConv1d + self.conv = conv_fn( in_channels, out_channels, kernel_size=kernel_size, @@ -153,20 +157,25 @@ def __init__( dilation=dilation, bias=bias, ) - conv.partial = use_partial_padding - torch.nn.init.xavier_uniform_(conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) + torch.nn.init.xavier_uniform_(self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) if use_weight_norm: - conv = torch.nn.utils.weight_norm(conv) + self.conv = torch.nn.utils.weight_norm(self.conv) if norm_fn is not None: self.norm = norm_fn(out_channels, affine=True) else: self.norm = None - self.conv = conv - def forward(self, input: torch.Tensor, mask_in: Optional[torch.Tensor] = None) -> torch.Tensor: - ret = self.conv(input, mask_in) - if self.norm is not None: - ret = self.norm(ret) + def forward(self, signal, mask=None): + if self.use_partial_padding: + ret = self.conv(signal, mask) + if self.norm is not None: + ret = self.norm(ret, mask) + else: + if mask is not None: + signal = signal * mask + ret = self.conv(signal) + if self.norm is not None: + ret = self.norm(ret) return ret diff --git a/nemo/core/classes/exportable.py b/nemo/core/classes/exportable.py index 5a9ab55a4ee7..0ac2ea663b57 100644 --- a/nemo/core/classes/exportable.py +++ b/nemo/core/classes/exportable.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import os from abc import ABC from typing import List, Union @@ -128,7 +127,7 @@ def _export( # Set module mode with torch.onnx.select_model_mode_for_export( self, training - ), torch.inference_mode(), torch.jit.optimized_execution(True): + ), torch.inference_mode(), torch.no_grad(), torch.jit.optimized_execution(True): if input_example is None: input_example = self.input_module.input_example() @@ -147,12 +146,14 @@ def _export( output_names = self.output_names output_example = tuple(self.forward(*input_list, **input_dict)) + if check_trace: + if isinstance(check_trace, bool): + check_trace_input = [input_example] + else: + check_trace_input = check_trace + if format == ExportFormat.TORCHSCRIPT: - if check_trace: - if isinstance(check_trace, bool): - check_trace_input = {"forward": tuple(input_list) + tuple(input_dict.values())} - else: - check_trace_input = check_trace + jitted_model = torch.jit.trace_module( self, {"forward": tuple(input_list) + tuple(input_dict.values())}, @@ -165,14 +166,9 @@ def _export( if verbose: logging.info(f"JIT code:\n{jitted_model.code}") jitted_model.save(output) - assert os.path.exists(output) + jitted_model = torch.jit.load(output) if check_trace: - if isinstance(check_trace, bool): - check_trace_input = [input_example] - else: - check_trace_input = check_trace - verify_torchscript(jitted_model, output, check_trace_input, input_names, check_tolerance) elif format == ExportFormat.ONNX: @@ -196,10 +192,6 @@ def _export( ) if check_trace: - if isinstance(check_trace, bool): - check_trace_input = [input_example] - else: - check_trace_input = check_trace verify_runtime(self, output, check_trace_input, input_names) else: raise ValueError(f'Encountered unknown export format {format}.') diff --git a/nemo/core/optim/radam.py b/nemo/core/optim/radam.py index 62a5ecff87be..69cfab4bf858 100644 --- a/nemo/core/optim/radam.py +++ b/nemo/core/optim/radam.py @@ -81,8 +81,8 @@ def step(self, closure=None): exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] - exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) - exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1.0 - beta2)) + exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1)) state['step'] += 1 buffered = self.buffer[int(state['step'] % 10)] diff --git a/nemo/utils/cast_utils.py b/nemo/utils/cast_utils.py index 9eb064936ea5..f973a4719e24 100644 --- a/nemo/utils/cast_utils.py +++ b/nemo/utils/cast_utils.py @@ -70,6 +70,6 @@ def __init__(self, mod): self.mod = mod def forward(self, x): - with avoid_float16_autocast_context(): + with torch.cuda.amp.autocast(enabled=False): ret = self.mod.forward(x.to(torch.float32)).to(x.dtype) return ret diff --git a/nemo/utils/export_utils.py b/nemo/utils/export_utils.py index a5c4e5b3d24f..fbe21b9cf8f8 100644 --- a/nemo/utils/export_utils.py +++ b/nemo/utils/export_utils.py @@ -15,7 +15,7 @@ import os from contextlib import nullcontext from enum import Enum -from typing import Callable, Dict, Optional, Type +from typing import Callable, Dict, List, Optional, Type import onnx import torch @@ -160,7 +160,7 @@ def verify_torchscript(model, output, input_examples, input_names, check_toleran for input_example in input_examples: input_list, input_dict = parse_input_example(input_example) output_example = model.forward(*input_list, **input_dict) - # ts_input = to_onnxrt_input(ort_input_names, input_names, input_dict, input_list) + all_good = all_good and run_ts_and_compare(ts_model, input_list, input_dict, output_example, check_tolerance) status = "SUCCESS" if all_good else "FAIL" logging.info(f"Torchscript generated at {output} verified with torchscript forward : " + status) @@ -203,7 +203,7 @@ def run_ts_and_compare(ts_model, ts_input_list, ts_input_dict, output_example, c if torch.is_tensor(expected): tout = out.to('cpu') - logging.info(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") + logging.debug(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=check_tolerance): all_good = False logging.info(f"onnxruntime results mismatch! PyTorch(expected):\n{expected}\nTorchScript:\n{tout}") @@ -219,7 +219,7 @@ def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01): if torch.is_tensor(expected): tout = torch.from_numpy(out) - logging.info(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") + logging.debug(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=100 * check_tolerance): all_good = False logging.info(f"onnxruntime results mismatch! PyTorch(expected):\n{expected}\nONNXruntime:\n{tout}") @@ -418,6 +418,11 @@ def replace_modules( return model +def script_module(m: nn.Module): + m1 = torch.jit.script(m) + return m1 + + default_replacements = { "BatchNorm1d": wrap_module(nn.BatchNorm1d, CastToFloat), "BatchNorm2d": wrap_module(nn.BatchNorm2d, CastToFloat), @@ -425,6 +430,11 @@ def replace_modules( "MatchedScaleMaskSoftmax": wrap_module(nn.Softmax, ExportableMatchedScaleMaskSoftmax), } +script_replacements = { + "BiLSTM": script_module, + "ConvLSTMLinear": script_module, +} + def replace_for_export(model: nn.Module) -> nn.Module: """ @@ -438,3 +448,5 @@ def replace_for_export(model: nn.Module) -> nn.Module: """ replace_modules(model, default_Apex_replacements) replace_modules(model, default_replacements) + # This one has to be the last + replace_modules(model, script_replacements) diff --git a/scripts/export.py b/scripts/export.py index b3d6317e936c..2e100e446e72 100644 --- a/scripts/export.py +++ b/scripts/export.py @@ -143,10 +143,11 @@ def nemo_export(argv): if check_trace and len(in_args) > 0: input_example = model.input_module.input_example(**in_args) check_trace = [input_example] - for key, arg in in_args: + for key, arg in in_args.items(): in_args[key] = (arg + 1) // 2 input_example2 = model.input_module.input_example(**in_args) check_trace.append(input_example2) + logging.info(f"Using additional check args: {in_args}") _, descriptions = model.export( out, diff --git a/tests/collections/tts/test_tts_exportables.py b/tests/collections/tts/test_tts_exportables.py index 3c3f13a028a6..e3e496373271 100644 --- a/tests/collections/tts/test_tts_exportables.py +++ b/tests/collections/tts/test_tts_exportables.py @@ -15,8 +15,10 @@ import tempfile import pytest +from omegaconf import OmegaConf -from nemo.collections.tts.models import FastPitchModel, HifiGanModel +from nemo.collections.tts.models import FastPitchModel, HifiGanModel, RadTTSModel +from nemo.utils.app_state import AppState @pytest.fixture() @@ -31,6 +33,27 @@ def hifigan_model(): return model +@pytest.fixture() +def radtts_model(): + this_test_dir = os.path.dirname(os.path.abspath(__file__)) + + cfg = OmegaConf.load(os.path.join(this_test_dir, '../../../examples/tts/conf/rad-tts_feature_pred.yaml')) + cfg.model.init_from_ptl_ckpt = None + cfg.model.train_ds.dataset.manifest_filepath = "dummy.json" + cfg.model.train_ds.dataset.sup_data_path = "dummy.json" + cfg.model.validation_ds.dataset.manifest_filepath = "dummy.json" + cfg.model.validation_ds.dataset.sup_data_path = "dummy.json" + cfg.pitch_mean = 212.35 + cfg.pitch_std = 68.52 + + app_state = AppState() + app_state.is_model_being_restored = True + model = RadTTSModel(cfg=cfg.model) + app_state.is_model_being_restored = False + model.eval() + return model + + class TestExportable: @pytest.mark.run_only_on('GPU') @pytest.mark.unit @@ -50,7 +73,10 @@ def test_HifiGanModel_export_to_onnx(self, hifigan_model): filename = os.path.join(tmpdir, 'hfg.pt') model.export(output=filename, verbose=True, check_trace=True) - -if __name__ == "__main__": - t = TestExportable() - t.test_FastPitchModel_export_to_onnx(fastpitch_model()) + @pytest.mark.run_only_on('GPU') + @pytest.mark.unit + def test_RadTTSModel_export_to_torchscript(self, radtts_model): + model = radtts_model.cuda() + with tempfile.TemporaryDirectory() as tmpdir: + filename = os.path.join(tmpdir, 'rad.ts') + model.export(output=filename, verbose=True, check_trace=True) From 4b48ea89042778fd29f084c1ef243378f1a5abb3 Mon Sep 17 00:00:00 2001 From: Boris Fomitchev Date: Mon, 21 Nov 2022 13:11:24 -0800 Subject: [PATCH 192/244] Radtts 1.13 plus (#5457) * [TTS] Fixing RADTTS training - removing view buffer and fixing accuracy issue (#5358) * Fixing RADTTS training - removing view buffer and fixing accuracy issue * Fixes for Torchscript/Triton * Added autocast to radtts UT * using cuda() for training example Signed-off-by: Boris Fomitchev Signed-off-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Xuesong Yang <1646669+XuesongYang@users.noreply.github.com> Co-authored-by: Oleksii Kuchaiev --- examples/tts/radtts.py | 4 +- nemo/collections/tts/modules/common.py | 50 ++++++------------- nemo/collections/tts/modules/radtts.py | 8 ++- nemo/utils/export_utils.py | 36 ++++++++----- tests/collections/tts/test_tts_exportables.py | 4 +- 5 files changed, 47 insertions(+), 55 deletions(-) diff --git a/examples/tts/radtts.py b/examples/tts/radtts.py index 7260e8d9907f..7dbdaedced03 100644 --- a/examples/tts/radtts.py +++ b/examples/tts/radtts.py @@ -61,14 +61,14 @@ def prepare_model_weights(model, unfreeze_modules): def main(cfg): trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get('exp_manager', None)) - model = RadTTSModel(cfg=cfg.model, trainer=trainer) + model = RadTTSModel(cfg=cfg.model, trainer=trainer).cuda() if cfg.model.load_from_checkpoint: model.maybe_init_from_pretrained_checkpoint(cfg=cfg.model) prepare_model_weights(model, cfg.model.trainerConfig.unfreeze_modules) lr_logger = pl.callbacks.LearningRateMonitor() epoch_time_logger = LogEpochTimeCallback() trainer.callbacks.extend([lr_logger, epoch_time_logger]) - trainer.fit(model) + trainer.fit(model.cuda()) if __name__ == '__main__': diff --git a/nemo/collections/tts/modules/common.py b/nemo/collections/tts/modules/common.py index 5d93340b4c2d..906de05cd8c8 100644 --- a/nemo/collections/tts/modules/common.py +++ b/nemo/collections/tts/modules/common.py @@ -119,39 +119,29 @@ def __init__(self, input_size, hidden_size, num_layers=1, lstm_norm_fn="spectral lstm_norm_fn_pntr(self.bilstm, 'weight_hh_l0_reverse') self.bilstm.flatten_parameters() - @torch.jit.export def lstm_tensor(self, context: Tensor, lens: Tensor, enforce_sorted: bool = False) -> Tuple[Tensor, Tensor]: seq = nn.utils.rnn.pack_padded_sequence( context, lens.long().cpu(), batch_first=True, enforce_sorted=enforce_sorted ) - if not (torch.jit.is_scripting() or torch.jit.is_tracing()): - self.bilstm.flatten_parameters() - if hasattr(self.bilstm, 'forward'): - ret, _ = self.bilstm.forward(seq) - else: - ret, _ = self.bilstm.forward_1(seq) - return nn.utils.rnn.pad_packed_sequence(ret, batch_first=True) + return self.lstm_sequence(seq) - @torch.jit.export def lstm_sequence(self, seq: PackedSequence) -> Tuple[Tensor, Tensor]: if not (torch.jit.is_scripting() or torch.jit.is_tracing()): self.bilstm.flatten_parameters() - if hasattr(self.bilstm, 'forward'): - ret, _ = self.bilstm.forward(seq) - elif hasattr(self.bilstm, 'forward_1'): - ret, _ = self.bilstm.forward_1(seq) + ret, _ = self.bilstm(seq) return nn.utils.rnn.pad_packed_sequence(ret, batch_first=True) - @torch.jit.export - def sort_and_lstm_tensor(self, context: Tensor, lens: Tensor) -> Tensor: + def forward(self, context: Tensor, lens: Tensor) -> Tensor: context, lens_sorted, unsort_ids = sort_tensor(context, lens) - seq = nn.utils.rnn.pack_padded_sequence( - context, lens_sorted.long().cpu(), batch_first=True, enforce_sorted=True - ) - return self.lstm_sequence(seq)[0][unsort_ids] + dtype = context.dtype + # this is only needed for Torchscript to run in Triton + # (https://github.com/pytorch/pytorch/issues/89241) + with torch.cuda.amp.autocast(enabled=False): + ret = self.lstm_tensor(context.to(dtype=torch.float32), lens_sorted, enforce_sorted=True) + return ret[0].to(dtype=dtype)[unsort_ids] -class ConvLSTMLinear(BiLSTM): +class ConvLSTMLinear(nn.Module): def __init__( self, in_dim=None, @@ -163,7 +153,8 @@ def __init__( use_partial_padding=False, norm_fn=None, ): - super(ConvLSTMLinear, self).__init__(n_channels, int(n_channels // 2), 1) + super(ConvLSTMLinear, self).__init__() + self.bilstm = BiLSTM(n_channels, int(n_channels // 2), 1) self.convolutions = nn.ModuleList() if n_layers > 0: @@ -194,27 +185,16 @@ def __init__( if out_dim is not None: self.dense = nn.Linear(n_channels, out_dim) - def masked_conv_to_sequence(self, context: Tensor, lens: Tensor, enforce_sorted: bool = False) -> PackedSequence: + def forward(self, context: Tensor, lens: Tensor) -> Tensor: mask = get_mask_from_lengths_and_val(lens, context) mask = mask.to(dtype=context.dtype).unsqueeze(1) for conv in self.convolutions: context = self.dropout(F.relu(conv(context, mask))) - context = context.transpose(1, 2) - seq = torch.nn.utils.rnn.pack_padded_sequence( - context, lens.long().cpu(), batch_first=True, enforce_sorted=enforce_sorted - ) - return seq - - def forward(self, context: Tensor, lens: Tensor) -> Tensor: - context, lens, unsort_ids = sort_tensor(context, lens) - seq = self.masked_conv_to_sequence(context, lens, enforce_sorted=True) - context, _ = self.lstm_sequence(seq) - context = context[unsort_ids] - + # Apply Bidirectional LSTM + context = self.bilstm(context, lens) if self.dense is not None: context = self.dense(context).permute(0, 2, 1) - return context diff --git a/nemo/collections/tts/modules/radtts.py b/nemo/collections/tts/modules/radtts.py index d41e7dd628e5..aca6a5c44727 100644 --- a/nemo/collections/tts/modules/radtts.py +++ b/nemo/collections/tts/modules/radtts.py @@ -346,9 +346,7 @@ def preprocess_context(self, context, speaker_vecs, out_lens, f0, energy_avg): context_w_spkvec = torch.cat((context_w_spkvec, energy_avg), 1) unfolded_out_lens = out_lens // self.n_group_size - context_lstm_padded_output = self.context_lstm.sort_and_lstm_tensor( - context_w_spkvec.transpose(1, 2), unfolded_out_lens - ) + context_lstm_padded_output = self.context_lstm(context_w_spkvec.transpose(1, 2), unfolded_out_lens) context_w_spkvec = context_lstm_padded_output.transpose(1, 2) if not self.context_lstm_w_f0_and_energy: @@ -773,8 +771,8 @@ def input_example(self, max_batch=1, max_dim=256): """ par = next(self.parameters()) sz = (max_batch, max_dim) - inp = torch.randint(0, 16, sz, device=par.device, dtype=torch.int64) - lens = torch.randint(16, max_dim, (max_batch,), device=par.device, dtype=torch.int) + inp = torch.randint(16, 32, sz, device=par.device, dtype=torch.int64) + lens = torch.randint(max_dim // 4, max_dim // 2, (max_batch,), device=par.device, dtype=torch.int) speaker = torch.randint(0, 1, (max_batch,), device=par.device, dtype=torch.int64) inputs = { 'text': inp, diff --git a/nemo/utils/export_utils.py b/nemo/utils/export_utils.py index fbe21b9cf8f8..197d3b478167 100644 --- a/nemo/utils/export_utils.py +++ b/nemo/utils/export_utils.py @@ -15,7 +15,7 @@ import os from contextlib import nullcontext from enum import Enum -from typing import Callable, Dict, List, Optional, Type +from typing import Callable, Dict, Optional, Type import onnx import torch @@ -154,14 +154,16 @@ def to_onnxrt_input(ort_input_names, input_names, input_dict, input_list): def verify_torchscript(model, output, input_examples, input_names, check_tolerance=0.01): - ts_model = torch.jit.load(output) - all_good = True for input_example in input_examples: input_list, input_dict = parse_input_example(input_example) output_example = model.forward(*input_list, **input_dict) - - all_good = all_good and run_ts_and_compare(ts_model, input_list, input_dict, output_example, check_tolerance) + # We disable autocast here to make sure exported TS will run under Triton or other C++ env + with torch.cuda.amp.autocast(enabled=False): + ts_model = torch.jit.load(output) + all_good = all_good and run_ts_and_compare( + ts_model, input_list, input_dict, output_example, check_tolerance + ) status = "SUCCESS" if all_good else "FAIL" logging.info(f"Torchscript generated at {output} verified with torchscript forward : " + status) return all_good @@ -204,9 +206,15 @@ def run_ts_and_compare(ts_model, ts_input_list, ts_input_dict, output_example, c if torch.is_tensor(expected): tout = out.to('cpu') logging.debug(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") - if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=check_tolerance): + this_good = True + try: + if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=check_tolerance): + this_good = False + except Exception: # there may ne size mismatch and it may be OK + this_good = False + if not this_good: + logging.info(f"Results mismatch! PyTorch(expected):\n{expected}\nTorchScript:\n{tout}") all_good = False - logging.info(f"onnxruntime results mismatch! PyTorch(expected):\n{expected}\nTorchScript:\n{tout}") return all_good @@ -220,9 +228,15 @@ def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01): if torch.is_tensor(expected): tout = torch.from_numpy(out) logging.debug(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") - if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=100 * check_tolerance): - all_good = False + this_good = True + try: + if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=100 * check_tolerance): + this_good = False + except Exception: # there may ne size mismatch and it may be OK + this_good = False + if not this_good: logging.info(f"onnxruntime results mismatch! PyTorch(expected):\n{expected}\nONNXruntime:\n{tout}") + all_good = False return all_good @@ -419,8 +433,7 @@ def replace_modules( def script_module(m: nn.Module): - m1 = torch.jit.script(m) - return m1 + return torch.jit.script(m) default_replacements = { @@ -432,7 +445,6 @@ def script_module(m: nn.Module): script_replacements = { "BiLSTM": script_module, - "ConvLSTMLinear": script_module, } diff --git a/tests/collections/tts/test_tts_exportables.py b/tests/collections/tts/test_tts_exportables.py index e3e496373271..bf2c0842eb91 100644 --- a/tests/collections/tts/test_tts_exportables.py +++ b/tests/collections/tts/test_tts_exportables.py @@ -15,6 +15,7 @@ import tempfile import pytest +import torch from omegaconf import OmegaConf from nemo.collections.tts.models import FastPitchModel, HifiGanModel, RadTTSModel @@ -79,4 +80,5 @@ def test_RadTTSModel_export_to_torchscript(self, radtts_model): model = radtts_model.cuda() with tempfile.TemporaryDirectory() as tmpdir: filename = os.path.join(tmpdir, 'rad.ts') - model.export(output=filename, verbose=True, check_trace=True) + with torch.cuda.amp.autocast(enabled=True): + model.export(output=filename, verbose=True, check_trace=True) From 8552c95355660caf030fedbb5ee50cf1726816fe Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Mon, 21 Nov 2022 14:36:32 -0800 Subject: [PATCH 193/244] Add num layers check (#5470) Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy --- nemo/collections/nlp/modules/common/megatron/transformer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/nlp/modules/common/megatron/transformer.py b/nemo/collections/nlp/modules/common/megatron/transformer.py index dd4f920f8194..12e18c853bce 100644 --- a/nemo/collections/nlp/modules/common/megatron/transformer.py +++ b/nemo/collections/nlp/modules/common/megatron/transformer.py @@ -2372,7 +2372,7 @@ def forward( fp8_context = nullcontext() with fp8_context: - if self.activations_checkpoint_granularity == 'full': + if self.activations_checkpoint_granularity == 'full' and self.activations_checkpoint_num_layers > 0: hidden_states = self._checkpointed_forward( hidden_states, attention_mask, From 4a523ad4a50ad6b95f0d52d3d9d95fa2f5374614 Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Tue, 22 Nov 2022 10:08:01 -0800 Subject: [PATCH 194/244] Change to kwargs (#5475) Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy --- .../nlp/modules/common/megatron/transformer.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nemo/collections/nlp/modules/common/megatron/transformer.py b/nemo/collections/nlp/modules/common/megatron/transformer.py index 12e18c853bce..6a0f0e5be6f4 100644 --- a/nemo/collections/nlp/modules/common/megatron/transformer.py +++ b/nemo/collections/nlp/modules/common/megatron/transformer.py @@ -2185,13 +2185,13 @@ def custom_forward(*inputs): for index in range(start, end): layer = self._get_layer(index) hidden_states = layer( - hidden_states, - attention_mask, - encoder_output, - enc_dec_attn_mask, - rotary_pos_emb, - self_attention_relative_position_bias, - cross_attention_relative_position_bias, + hidden_states=hidden_states, + attention_mask=attention_mask, + encoder_output=encoder_output, + enc_dec_attn_mask=enc_dec_attn_mask, + rotary_pos_emb=rotary_pos_emb, + self_attention_relative_position_bias=self_attention_relative_position_bias, + cross_attention_relative_position_bias=cross_attention_relative_position_bias, ) if isinstance(hidden_states, tuple): pass From 959bddfa506fc02d035ca6232da60b1ca00ab2d7 Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Tue, 22 Nov 2022 11:51:59 -0800 Subject: [PATCH 195/244] Support for finetuning and finetuning inference with .ckpt files & batch size refactoring (#5339) (#5478) * Initial refactor Signed-off-by: MaximumEntropy * Resolve config before passing to load_from_checkpoint Signed-off-by: MaximumEntropy * Fixes for model parallel and nemo restore Signed-off-by: MaximumEntropy * Fixes for eval Signed-off-by: MaximumEntropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Revert config changes Signed-off-by: MaximumEntropy * Refactor Signed-off-by: MaximumEntropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix typo Signed-off-by: MaximumEntropy * Remove comments Signed-off-by: MaximumEntropy * Minor Signed-off-by: MaximumEntropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix validation reconfiguration Signed-off-by: MaximumEntropy * Remove old comment Signed-off-by: MaximumEntropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes for test_ds Signed-off-by: MaximumEntropy * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: MaximumEntropy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Signed-off-by: MaximumEntropy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../conf/megatron_t0_config.yaml | 10 +- .../megatron_t5_config_finetune_eval.yaml | 6 +- ...megatron_t5_config_finetune_glue_eval.yaml | 9 +- ...megatron_t5_config_finetune_glue_mnli.yaml | 6 +- ...megatron_t5_config_finetune_glue_xnli.yaml | 8 +- .../conf/megatron_t5_finetune.yaml | 6 +- .../megatron_t5_seq2seq_eval.py | 119 ++++++------ .../megatron_t5_seq2seq_finetune.py | 172 +++++++++++++----- .../megatron_finetune_model.py | 141 ++++++-------- 9 files changed, 283 insertions(+), 194 deletions(-) diff --git a/examples/nlp/language_modeling/conf/megatron_t0_config.yaml b/examples/nlp/language_modeling/conf/megatron_t0_config.yaml index 3850ce505819..f839754fcdc1 100644 --- a/examples/nlp/language_modeling/conf/megatron_t0_config.yaml +++ b/examples/nlp/language_modeling/conf/megatron_t0_config.yaml @@ -36,7 +36,11 @@ exp_manager: save_best_model: True model: - restore_from_path: ??? # Path to a trained T5 or LM-adapted T5 .nemo file + restore_from_path: null # Path to a trained T5 .nemo file + pretrained_checkpoint: + checkpoint_dir: null # Path to a folder that contains a .ckpt file + checkpoint_name: null # Name of the .ckpt file within the checkpoint_dir. + hparams_file: null # Path to a .yaml file that contains the hyperparameters of the checkpoint. tensor_model_parallel_size: 1 pipeline_model_parallel_size: 1 pipeline_model_parallel_split_rank: 0 @@ -78,6 +82,10 @@ model: name: "exact_string_match" # Name of the evaluation metric to use. average: null # Average the metric over the dataset. Options: ['macro', 'micro']. Works only for 'F1', 'accuracy' etc. Refer to torchmetrics for metrics where this is supported. num_classes: null + replace_bos_with_pad: ${data.train_ds.replace_bos_with_pad} + add_bos_to_input: ${data.train_ds.add_bos_to_input} + add_eos_to_input: ${data.train_ds.add_eos_to_input} + seed: 1234 optim: name: fused_adam diff --git a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_eval.yaml b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_eval.yaml index 8be471a78dde..bc1a7420df48 100644 --- a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_eval.yaml +++ b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_eval.yaml @@ -17,7 +17,11 @@ exp_manager: create_checkpoint_callback: False model: - restore_from_path: ??? # Path to a finetuned T5 .nemo file + restore_from_path: null # Path to a trained T5 .nemo file + pretrained_checkpoint: + checkpoint_dir: null # Path to a folder that contains a .ckpt file + checkpoint_name: null # Name of the .ckpt file within the checkpoint_dir. + hparams_file: null # Path to a .yaml file that contains the hyperparameters of the checkpoint. gradient_as_bucket_view: True # Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) megatron_amp_O2: False # Enable O2 optimization for megatron amp diff --git a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_eval.yaml b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_eval.yaml index 87ce5ac03eb5..024ad5f66ae9 100644 --- a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_eval.yaml +++ b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_eval.yaml @@ -17,9 +17,16 @@ exp_manager: create_checkpoint_callback: False model: - restore_from_path: ??? # Path to a finetuned T5 .nemo file + restore_from_path: null # Path to a trained T5 .nemo file + pretrained_checkpoint: + checkpoint_dir: null # Path to a folder that contains a .ckpt file + checkpoint_name: null # Name of the .ckpt file within the checkpoint_dir. + hparams_file: null # Path to a .yaml file that contains the hyperparameters of the checkpoint. gradient_as_bucket_view: True # Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) megatron_amp_O2: False # Enable O2 optimization for megatron amp + tensor_model_parallel_size: 1 + pipeline_model_parallel_size: 1 + pipeline_model_parallel_split_rank: 0 data: validation_ds: diff --git a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_mnli.yaml b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_mnli.yaml index ac68b57e0216..ff61c5fde20c 100644 --- a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_mnli.yaml +++ b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_mnli.yaml @@ -37,7 +37,11 @@ exp_manager: save_best_model: True model: - restore_from_path: ??? # Path to a trained T5 .nemo file + restore_from_path: null # Path to a trained T5 .nemo file + pretrained_checkpoint: + checkpoint_dir: null # Path to a folder that contains a .ckpt file + checkpoint_name: null # Name of the .ckpt file within the checkpoint_dir. + hparams_file: null # Path to a .yaml file that contains the hyperparameters of the checkpoint. tensor_model_parallel_size: 1 pipeline_model_parallel_size: 1 pipeline_model_parallel_split_rank: 0 diff --git a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_xnli.yaml b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_xnli.yaml index 1b08bc37246e..486a6da14135 100644 --- a/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_xnli.yaml +++ b/examples/nlp/language_modeling/conf/megatron_t5_config_finetune_glue_xnli.yaml @@ -37,9 +37,13 @@ exp_manager: save_best_model: True model: - restore_from_path: ??? + restore_from_path: null # Path to a trained T5 .nemo file + pretrained_checkpoint: + checkpoint_dir: null # Path to a folder that contains a .ckpt file + checkpoint_name: null # Name of the .ckpt file within the checkpoint_dir. + hparams_file: null # Path to a .yaml file that contains the hyperparameters of the checkpoint. tensor_model_parallel_size: 1 - pipeline_model_parallel_size: 2 + pipeline_model_parallel_size: 1 pipeline_model_parallel_split_rank: 1 gradient_as_bucket_view: True # Allocate gradients in a contiguous bucket to save memory (less fragmentation and buffer memory) resume_from_checkpoint: null diff --git a/examples/nlp/language_modeling/conf/megatron_t5_finetune.yaml b/examples/nlp/language_modeling/conf/megatron_t5_finetune.yaml index 9a5cf15cfe74..8c383aad9c78 100644 --- a/examples/nlp/language_modeling/conf/megatron_t5_finetune.yaml +++ b/examples/nlp/language_modeling/conf/megatron_t5_finetune.yaml @@ -36,7 +36,11 @@ exp_manager: save_best_model: True model: - restore_from_path: ??? # Path to a trained T5 .nemo file + restore_from_path: null # Path to a trained T5 .nemo file + pretrained_checkpoint: + checkpoint_dir: null # Path to a folder that contains a .ckpt file + checkpoint_name: null # Name of the .ckpt file within the checkpoint_dir. + hparams_file: null # Path to a .yaml file that contains the hyperparameters of the checkpoint. tensor_model_parallel_size: 1 pipeline_model_parallel_size: 1 pipeline_model_parallel_split_rank: 0 diff --git a/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py b/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py index 25fd84d800d4..e78d34adee65 100644 --- a/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py +++ b/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +from megatron_t5_seq2seq_finetune import load_from_checkpoint_dir, load_from_nemo, validate_checkpoint_loading_args from omegaconf.omegaconf import OmegaConf, open_dict from pytorch_lightning import Trainer from pytorch_lightning.callbacks.timer import Timer @@ -21,17 +22,51 @@ from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel from nemo.collections.nlp.models.language_modeling.megatron_glue_model import MegatronT5GLUEModel from nemo.collections.nlp.models.language_modeling.megatron_t0_model import MegatronT0Model -from nemo.collections.nlp.parts.nlp_overrides import ( - GradScaler, - MegatronHalfPrecisionPlugin, - NLPDDPStrategy, - NLPSaveRestoreConnector, -) +from nemo.collections.nlp.parts.nlp_overrides import GradScaler, MegatronHalfPrecisionPlugin, NLPDDPStrategy from nemo.core.config import hydra_runner from nemo.utils import logging from nemo.utils.exp_manager import StatelessTimer, exp_manager +def _modify_config(t5_cfg, cfg, add_cfg_to_tree=False): + """ + This function modifies the original t5 pre-training config (t5_cfg) with attributes from the finetuning config (cfg). + The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`. + """ + OmegaConf.set_struct(t5_cfg, True) + with open_dict(t5_cfg): + t5_cfg.precision = cfg.trainer.precision + # Overwrite data configs + if cfg.model.data.validation_ds.get('src_file_name', None) is not None: + logging.info( + 'Found validation_ds.src_file_name in the config file. Overriding the finetuned model config file with the values from the new config file.' + ) + t5_cfg.data.validation_ds.src_file_name = cfg.model.data.validation_ds.src_file_name + if cfg.model.data.validation_ds.get('tgt_file_name', None) is not None: + logging.info( + 'Found validation_ds.tgt_file_name in the config file. Overriding the finetuned model config file with the values from the new config file.' + ) + t5_cfg.data.validation_ds.tgt_file_name = cfg.model.data.validation_ds.tgt_file_name + + if "write_predictions_to_file" in cfg.model.data.validation_ds: + t5_cfg.data.validation_ds.write_predictions_to_file = ( + cfg.model.data.validation_ds.write_predictions_to_file + ) + if "output_file_path_prefix" in cfg.model.data.validation_ds: + t5_cfg.data.validation_ds.output_file_path_prefix = cfg.model.data.validation_ds.output_file_path_prefix + + t5_cfg.data.validation_ds.micro_batch_size = cfg.model.data.validation_ds.micro_batch_size + t5_cfg.data.validation_ds.global_batch_size = cfg.model.data.validation_ds.global_batch_size + + # This is needed when modifying a hparam file directly to load `.ckpt` files. + # This is not needed to modify the cfg in `.nemo` files. + if add_cfg_to_tree: + OmegaConf.resolve(t5_cfg) + t5_cfg.cfg = t5_cfg + + return t5_cfg + + @hydra_runner(config_path="conf", config_name="megatron_t5_config_finetune_glue_eval") def main(cfg) -> None: logging.info("\n\n************** Experiment configuration ***********") @@ -69,59 +104,33 @@ def main(cfg) -> None: if isinstance(callback, Timer): trainer.callbacks[idx] = StatelessTimer(cfg.trainer.max_time,) - t5_cfg = MegatronT5GLUEModel.restore_from( - restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True - ) - - # Override the T5 configuration with the one from the config file. - # NOTE: Only data can be overriden here since this the file being restored here should already correspond to a GLUE/XNLI finetuned model. - OmegaConf.set_struct(t5_cfg, True) - with open_dict(t5_cfg): - t5_cfg.precision = cfg.trainer.precision - # Overwrite data configs - if cfg.model.data.validation_ds.get('src_file_name', None) is not None: - logging.info( - 'Found validation_ds.src_file_name in the config file. Overriding the finetuned model config file with the values from the new config file.' + if hasattr(cfg.model.data.validation_ds, 'task_name'): + if cfg.model.restore_from_path: + t5_cfg = MegatronT5GLUEModel.restore_from( + restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True ) - t5_cfg.data.validation_ds.src_file_name = cfg.model.data.validation_ds.src_file_name - if cfg.model.data.validation_ds.get('tgt_file_name', None) is not None: - logging.info( - 'Found validation_ds.tgt_file_name in the config file. Overriding the finetuned model config file with the values from the new config file.' - ) - t5_cfg.data.validation_ds.tgt_file_name = cfg.model.data.validation_ds.tgt_file_name - - if "write_predictions_to_file" in cfg.model.data.validation_ds: - t5_cfg.data.validation_ds.write_predictions_to_file = ( - cfg.model.data.validation_ds.write_predictions_to_file - ) - if "output_file_path_prefix" in cfg.model.data.validation_ds: - t5_cfg.data.validation_ds.output_file_path_prefix = cfg.model.data.validation_ds.output_file_path_prefix - t5_cfg.data.validation_ds.src_file_name = cfg.model.data.validation_ds.src_file_name - - t5_cfg.data.validation_ds.micro_batch_size = cfg.model.data.validation_ds.micro_batch_size - t5_cfg.data.validation_ds.global_batch_size = cfg.model.data.validation_ds.global_batch_size - - if hasattr(cfg.model.data.validation_ds, 'task_name'): - model = MegatronT5GLUEModel.restore_from( - restore_path=cfg.model.restore_from_path, - trainer=trainer, - override_config_path=t5_cfg, - save_restore_connector=NLPSaveRestoreConnector(), - ) - elif hasattr(cfg.model.data.validation_ds, 'file_names'): - model = MegatronT0Model.restore_from( - restore_path=cfg.model.restore_from_path, - trainer=trainer, - override_config_path=t5_cfg, - save_restore_connector=NLPSaveRestoreConnector(), + model = load_from_nemo(MegatronT5GLUEModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) + else: + validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) + model = load_from_checkpoint_dir(MegatronT5GLUEModel, cfg, trainer, modify_confg_fn=_modify_config) + elif hasattr(cfg.model.data.validation_ds, 'file_names'): + if cfg.model.restore_from_path: + t5_cfg = MegatronT0Model.restore_from( + restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True ) + model = load_from_nemo(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) else: - model = MegatronT5FinetuneModel.restore_from( - restore_path=cfg.model.restore_from_path, - trainer=trainer, - override_config_path=t5_cfg, - save_restore_connector=NLPSaveRestoreConnector(), + validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) + model = load_from_checkpoint_dir(MegatronT0Model, cfg, trainer, modify_confg_fn=_modify_config) + else: + if cfg.model.restore_from_path: + t5_cfg = MegatronT5FinetuneModel.restore_from( + restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True ) + model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config) + else: + validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) + model = load_from_checkpoint_dir(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config) model.freeze() trainer.validate(model) diff --git a/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py b/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py index 22883657736f..84b78739f673 100644 --- a/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py +++ b/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py @@ -12,6 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import tempfile + from omegaconf.omegaconf import OmegaConf, open_dict from pytorch_lightning import Trainer from pytorch_lightning.callbacks.timer import Timer @@ -21,6 +24,7 @@ from nemo.collections.nlp.models.language_modeling.megatron_finetune_model import MegatronT5FinetuneModel from nemo.collections.nlp.models.language_modeling.megatron_glue_model import MegatronT5GLUEModel from nemo.collections.nlp.models.language_modeling.megatron_t0_model import MegatronT0Model +from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel from nemo.collections.nlp.parts.nlp_overrides import ( GradScaler, MegatronHalfPrecisionPlugin, @@ -29,8 +33,99 @@ PipelineMixedPrecisionPlugin, ) from nemo.core.config import hydra_runner -from nemo.utils import logging +from nemo.utils import AppState, logging from nemo.utils.exp_manager import StatelessTimer, exp_manager +from nemo.utils.model_utils import inject_model_parallel_rank + + +def _modify_config(t5_cfg, cfg, add_cfg_to_tree=False): + """ + This function modifies the original t5 pre-training config (t5_cfg) with attributes from the finetuning config (cfg). + The `add_cfg_to_tree` arg adds `cfg` to the top of the yaml tree which is needed for all `hparams.yaml` files when passed as an arg to `load_from_checkpoint()`. + """ + OmegaConf.set_struct(t5_cfg, True) + with open_dict(t5_cfg): + t5_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False) + if hasattr(t5_cfg, 'encoder') and hasattr(t5_cfg, 'decoder'): + t5_cfg.encoder.masked_softmax_fusion = False + t5_cfg.decoder.masked_softmax_fusion = False + t5_cfg.encoder.hidden_dropout = cfg.model.get('hidden_dropout', 0.1) + t5_cfg.decoder.hidden_dropout = cfg.model.get('hidden_dropout', 0.1) + if hasattr(t5_cfg.encoder, 'ffn_dropout'): + t5_cfg.encoder.ffn_dropout = cfg.model.get('ffn_dropout', 0.1) + if hasattr(t5_cfg.decoder, 'ffn_dropout'): + t5_cfg.decoder.ffn_dropout = cfg.model.get('ffn_dropout', 0.1) + else: + t5_cfg.hidden_dropout = cfg.model.get('hidden_dropout', 0.1) + t5_cfg.attention_dropout = cfg.model.get('attention_dropout', 0.1) + t5_cfg.masked_softmax_fusion = False + t5_cfg.data = cfg.model.data + t5_cfg.precision = cfg.trainer.precision + t5_cfg.optim = cfg.model.optim + t5_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size + t5_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size + # XNLI has eval languages in the yaml config. + if hasattr(cfg.model, 'eval_languages'): + t5_cfg.eval_languages = cfg.model.eval_languages + + # This is needed when modifying a hparam file directly to load `.ckpt` files. + # This is not needed to modify the cfg in `.nemo` files. + if add_cfg_to_tree: + OmegaConf.resolve(t5_cfg) + t5_cfg.cfg = t5_cfg + + return t5_cfg + + +def load_from_nemo(cls, cfg, trainer, t5_cfg, modify_confg_fn): + t5_cfg = modify_confg_fn(t5_cfg, cfg, add_cfg_to_tree=False) + model = cls.restore_from( + restore_path=cfg.model.restore_from_path, + trainer=trainer, + override_config_path=t5_cfg, + save_restore_connector=NLPSaveRestoreConnector(), + ) + return model + + +def load_from_checkpoint_dir(cls, cfg, trainer, modify_confg_fn): + app_state = AppState() + if cfg.model.tensor_model_parallel_size > 1 or cfg.model.pipeline_model_parallel_size > 1: + app_state.model_parallel_size = cfg.model.tensor_model_parallel_size * cfg.model.pipeline_model_parallel_size + app_state.tensor_model_parallel_size = cfg.model.tensor_model_parallel_size + app_state.pipeline_model_parallel_size = cfg.model.pipeline_model_parallel_size + ( + app_state.tensor_model_parallel_rank, + app_state.pipeline_model_parallel_rank, + app_state.model_parallel_size, + app_state.data_parallel_size, + app_state.pipeline_model_parallel_split_rank, + app_state.virtual_pipeline_model_parallel_rank, + ) = fake_initialize_model_parallel( + world_size=app_state.model_parallel_size, + rank=trainer.global_rank, + tensor_model_parallel_size_=cfg.model.tensor_model_parallel_size, + pipeline_model_parallel_size_=cfg.model.pipeline_model_parallel_size, + pipeline_model_parallel_split_rank_=cfg.model.pipeline_model_parallel_split_rank, + ) + checkpoint_path = inject_model_parallel_rank( + os.path.join(cfg.model.pretrained_checkpoint.checkpoint_dir, cfg.model.pretrained_checkpoint.checkpoint_name) + ) + hparams_file = OmegaConf.load(cfg.model.pretrained_checkpoint.hparams_file) + t5_cfg = modify_confg_fn(hparams_file.cfg, cfg, add_cfg_to_tree=True) + with tempfile.NamedTemporaryFile(suffix='.yaml') as f: + OmegaConf.save(config=t5_cfg, f=f.name) + model = cls.load_from_checkpoint(checkpoint_path=checkpoint_path, trainer=trainer, hparams_file=f.name,) + return model + + +def validate_checkpoint_loading_args(cfg): + if cfg.checkpoint_dir is None or not os.path.isdir(cfg.checkpoint_dir): + raise ValueError(f'Checkpoint directory {cfg.checkpoint_dir} does not exist or is not a directory.') + if cfg.checkpoint_name is None: + raise ValueError(f'Checkpoint name {cfg.checkpoint_name} is not valid.') + if cfg.hparams_file is None or not os.path.isfile(cfg.hparams_file): + raise ValueError(f'Hparams file {cfg.hparams_file} does not exist or is not a file.') @hydra_runner(config_path="conf", config_name="megatron_t5_config_finetune_glue_mnli") @@ -78,58 +173,35 @@ def main(cfg) -> None: if isinstance(callback, Timer): trainer.callbacks[idx] = StatelessTimer(cfg.trainer.max_time,) - # Get the T5 Base configuration. - t5_cfg = MegatronT5FinetuneModel.restore_from( - restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True - ) - - # Override the T5 configuration with the one from the config file. - OmegaConf.set_struct(t5_cfg, True) - with open_dict(t5_cfg): - t5_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False) - if hasattr(t5_cfg, 'encoder') and hasattr(t5_cfg, 'decoder'): - t5_cfg.encoder.masked_softmax_fusion = False - t5_cfg.decoder.masked_softmax_fusion = False - t5_cfg.encoder.hidden_dropout = cfg.model.get('hidden_dropout', 0.1) - t5_cfg.decoder.hidden_dropout = cfg.model.get('hidden_dropout', 0.1) - if hasattr(t5_cfg.encoder, 'ffn_dropout'): - t5_cfg.encoder.ffn_dropout = cfg.model.get('ffn_dropout', 0.1) - if hasattr(t5_cfg.decoder, 'ffn_dropout'): - t5_cfg.decoder.ffn_dropout = cfg.model.get('ffn_dropout', 0.1) - else: - t5_cfg.hidden_dropout = cfg.model.get('hidden_dropout', 0.1) - t5_cfg.attention_dropout = cfg.model.get('attention_dropout', 0.1) - t5_cfg.masked_softmax_fusion = False - t5_cfg.data = cfg.model.data - t5_cfg.precision = cfg.trainer.precision - t5_cfg.optim = cfg.model.optim - t5_cfg.micro_batch_size = cfg.model.data.train_ds.micro_batch_size - t5_cfg.global_batch_size = cfg.model.data.train_ds.global_batch_size - # XNLI has eval languages in the yaml config. - if hasattr(cfg.model, 'eval_languages'): - t5_cfg.eval_languages = cfg.model.eval_languages - if hasattr(cfg.model.data.train_ds, 'task_name'): - model = MegatronT5GLUEModel.restore_from( - restore_path=cfg.model.restore_from_path, - trainer=trainer, - override_config_path=t5_cfg, - save_restore_connector=NLPSaveRestoreConnector(), - ) + if cfg.model.restore_from_path: + t5_cfg = MegatronT5GLUEModel.restore_from( + restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True + ) + model = load_from_nemo(MegatronT5GLUEModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) + else: + validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) + model = load_from_checkpoint_dir(MegatronT5GLUEModel, cfg, trainer, modify_confg_fn=_modify_config) elif hasattr(cfg.model.data.train_ds, 'file_names'): - model = MegatronT0Model.restore_from( - restore_path=cfg.model.restore_from_path, - trainer=trainer, - override_config_path=t5_cfg, - save_restore_connector=NLPSaveRestoreConnector(), - ) + if cfg.model.restore_from_path: + t5_cfg = MegatronT0Model.restore_from( + restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True + ) + model = load_from_nemo(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) + else: + validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) + model = load_from_checkpoint_dir(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) else: - model = MegatronT5FinetuneModel.restore_from( - restore_path=cfg.model.restore_from_path, - trainer=trainer, - override_config_path=t5_cfg, - save_restore_connector=NLPSaveRestoreConnector(), - ) + if cfg.model.restore_from_path: + t5_cfg = MegatronT5FinetuneModel.restore_from( + restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True + ) + model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) + else: + validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) + model = load_from_checkpoint_dir( + MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config + ) trainer.fit(model) trainer.validate(model) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py b/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py index e29cea2264b6..941048304f6a 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py @@ -132,22 +132,9 @@ def setup(self, stage=None): self.setup_training_data() def _process_global_batch(self, global_batch): - """Process a list of microbatches into a global batch.""" - # If there is no language information in the global batch (ex: English MNLI), we can use the parent global batch processor as is. - if 'lang' not in global_batch[0]: - return self._process_global_batch_without_megatron_batch_sampler(global_batch) - - # For validation data (XNLI), we need to process the global batch and and then deal with language info separately. - else: - assert all(['lang' in micro_batch for micro_batch in global_batch]) - langs_list = [] - processed_global_batch = self._process_global_batch_without_megatron_batch_sampler( - [{k: v for k, v in micro_batch.items() if k != 'lang'} for micro_batch in global_batch] - ) - for micro_batch in global_batch: - langs_list.extend(micro_batch['lang']) - processed_global_batch['lang'] = langs_list - return processed_global_batch + """Optionally processes a global batch.""" + # TODO: maybe remove this now that we've refactored data batch sizes. + return global_batch def on_validation_epoch_start(self): app_state = AppState() @@ -160,7 +147,26 @@ def on_validation_epoch_start(self): ) return super().on_validation_epoch_start() + def on_test_epoch_start(self): + app_state = AppState() + _reconfigure_microbatch_calculator( + rank=app_state.global_rank, + rampup_batch_size=None, + global_batch_size=self.cfg.data.test_ds.global_batch_size, + micro_batch_size=self.cfg.data.test_ds.micro_batch_size, + data_parallel_size=parallel_state.get_data_parallel_world_size(), + ) + return super().on_test_epoch_start() + + def on_test_epoch_end(self): + self.on_inference_epoch_end(self.cfg.data.test_ds) + return super().on_test_epoch_end() + def on_validation_epoch_end(self): + self.on_inference_epoch_end(self.cfg.data.validation_ds) + return super().on_validation_epoch_end() + + def on_inference_epoch_end(self, ds): app_state = AppState() if hasattr(self, "_train_ds"): _reconfigure_microbatch_calculator( @@ -176,30 +182,32 @@ def on_validation_epoch_end(self): _reconfigure_microbatch_calculator( rank=app_state.global_rank, rampup_batch_size=None, - global_batch_size=self.cfg.data.validation_ds.global_batch_size, - micro_batch_size=self.cfg.data.validation_ds.micro_batch_size, + global_batch_size=ds.global_batch_size, + micro_batch_size=ds.micro_batch_size, data_parallel_size=parallel_state.get_data_parallel_world_size(), ) - return super().on_validation_epoch_end() + def on_train_epoch_start(self) -> None: + # Same logic as validation epoch end, but this may be need if there is no validation sanity check to trigger validation_epoch_end() + self.on_validation_epoch_end() + return super().on_train_epoch_start() def training_step(self, batch, batch_idx): - micro_batch_size = batch[0]['text_enc'].size(0) + global_batch_size_per_gpu = batch['text_enc'].size(0) # This should happen only on the last batch of the dataset. - if micro_batch_size != self.cfg.data.train_ds.micro_batch_size: + if ( + global_batch_size_per_gpu + != self.cfg.data.train_ds.global_batch_size // parallel_state.get_data_parallel_world_size() + ): + # NOTE: This should never really be called since `drop_last=True` is required for training datasets. app_state = AppState() _reconfigure_microbatch_calculator( rank=app_state.global_rank, rampup_batch_size=None, - global_batch_size=micro_batch_size - * parallel_state.get_data_parallel_world_size() - * get_num_microbatches(), - micro_batch_size=micro_batch_size, + global_batch_size=global_batch_size_per_gpu * parallel_state.get_data_parallel_world_size(), + micro_batch_size=global_batch_size_per_gpu // get_num_microbatches(), data_parallel_size=parallel_state.get_data_parallel_world_size(), ) - # At this point batch is a list of dictionaries where eatch dict is a microbatch. - # After the process_global_batch call, batch will be a single dictionary containing the global batch. - # This is required since the parent class expects a single global batch dictioanry. batch = self._process_global_batch(batch) return super().training_step(batch, batch_idx) @@ -258,27 +266,30 @@ def cast_for_metric(self, pred, label, metric_name, class_labels=None, labels_ar return pred, label - def inference_step(self, batch, batch_idx, mode, dataloader_idx=0): - batch_has_lang_information = len(batch[0]) == 7 - - micro_batch_size = batch[0]['text_enc'].size(0) + def _reconfigure_and_process_inference_batch(self, batch, ds_config): + global_batch_size_per_gpu = batch['text_enc'].size(0) # This should happen only on the last batch of the dataset. - if micro_batch_size != self.cfg.data.validation_ds.micro_batch_size: + if global_batch_size_per_gpu != ds_config.global_batch_size // parallel_state.get_data_parallel_world_size(): + # NOTE: This is reconfiguring to make sure there is no grad-acc for validation batches. app_state = AppState() _reconfigure_microbatch_calculator( rank=app_state.global_rank, rampup_batch_size=None, - global_batch_size=micro_batch_size - * parallel_state.get_data_parallel_world_size() - * get_num_microbatches(), - micro_batch_size=micro_batch_size, + global_batch_size=global_batch_size_per_gpu * parallel_state.get_data_parallel_world_size(), + micro_batch_size=global_batch_size_per_gpu, data_parallel_size=parallel_state.get_data_parallel_world_size(), ) - # At this point processed_batch is a list of dictionaries where eatch dict is a microbatch. - # After the process_global_batch call, processed_batch will be a single dictionary containing the global batch. - # This is required since the parent class expects a single global batch dictioanry. processed_batch = self._process_global_batch(batch) + return processed_batch + + def inference_step(self, batch, batch_idx, mode, dataloader_idx=0): + # Regular finetuning datasets will return a list of dicts for each microbatch. But T0 datasets will return a single dict for the global batch. + batch_has_lang_information = isinstance(batch, list) and len(batch[0]) == 7 + + processed_batch = self._reconfigure_and_process_inference_batch( + batch, self.cfg.data.validation_ds if mode == 'validation' else self.cfg.data.test_ds + ) # Call parent validation step to get the loss. # NOTE: There could be extra keys in the processed_batch dictionary such as "langs" for XNLI, this will be ignored in the parent class. @@ -306,8 +317,12 @@ def inference_step(self, batch, batch_idx, mode, dataloader_idx=0): pred=pred, label=label, metric_name=self.val_metric_name if mode == 'validation' else self.test_metric_name, - class_labels=self.cfg.data.validation_ds.metric.get('class_labels', None), - labels_are_strings=self.cfg.data.validation_ds.metric.get('labels_are_strings', False), + class_labels=self.cfg.data.validation_ds.metric.get('class_labels', None) + if mode == 'validation' + else self.cfg.data.test_ds.metric.get('class_labels', None), + labels_are_strings=self.cfg.data.validation_ds.metric.get('labels_are_strings', False) + if mode == 'validation' + else self.cfg.data.test_ds.metric.get('labels_are_strings', False), ) if batch_has_lang_information: _ = metric(pred, label, category) @@ -497,15 +512,7 @@ def test_epoch_end(self, outputs): _ = self.inference_epoch_end(outputs, 'test', self.cfg.data.test_ds) def build_data_loader( - self, - dataset, - micro_batch_size, - global_batch_size, - shuffle, - num_workers, - pin_memory, - drop_last, - check_validation_interval, + self, dataset, global_batch_size, shuffle, num_workers, pin_memory, drop_last, ): """Buld dataloader given an input dataset.""" @@ -517,20 +524,6 @@ def build_data_loader( sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank, shuffle=shuffle ) - # This check makes sure the val_check_interval is less than the number of global batches. - # Normally, PTL would do this check and properly account for gradient accumulation. - # But now, it is implicit in the apex fwd/bwd functions and so we need to check for this somewhere. - # The consequence of not doing this is that training loop will never run validation. - # NOTE: Prog bar is also broken as a result of this. - global_batch_size_per_gpu = micro_batch_size * get_num_microbatches() - if ( - self.trainer.val_check_interval > (sampler.num_samples // global_batch_size_per_gpu) - and check_validation_interval - ): - raise ValueError( - f"trainer.val_check_interval {self.trainer.val_check_interval} is > number of global batches {sampler.num_samples // global_batch_size}" - ) - if isinstance(dataset, ConcatMapDataset): collate_fn = dataset.datasets[0].collate_fn else: @@ -540,7 +533,7 @@ def build_data_loader( dataset, collate_fn=collate_fn, sampler=sampler, - batch_size=micro_batch_size, + batch_size=global_batch_size // parallel_state.get_data_parallel_world_size(), num_workers=num_workers, pin_memory=pin_memory, drop_last=drop_last, @@ -549,13 +542,11 @@ def build_data_loader( def setup_training_data(self): self._train_dl = self.build_data_loader( self._train_ds, - micro_batch_size=self.cfg.data.train_ds.micro_batch_size, global_batch_size=self.cfg.data.train_ds.global_batch_size, shuffle=self.cfg.data.train_ds.shuffle, num_workers=self.cfg.data.train_ds.num_workers, pin_memory=self.cfg.data.train_ds.pin_memory, drop_last=self.cfg.data.train_ds.drop_last, - check_validation_interval=True, ) def setup_eval_data(self, datasets, data_cfg): @@ -563,13 +554,11 @@ def setup_eval_data(self, datasets, data_cfg): for dataset in datasets: eval_dl = self.build_data_loader( dataset, - micro_batch_size=data_cfg.micro_batch_size, global_batch_size=data_cfg.global_batch_size, shuffle=data_cfg.shuffle, num_workers=data_cfg.num_workers, pin_memory=data_cfg.pin_memory, drop_last=data_cfg.drop_last, - check_validation_interval=False, ) dataloaders.append(eval_dl) return dataloaders @@ -681,15 +670,3 @@ def build_train_valid_test_datasets(self, stage): return self._train_ds = self._build_train_dataset(self.cfg.data.train_ds) logging.info(f'Finished building datasets ...') - - def on_train_start(self) -> None: - """PTL hook used to override DataFetcher with GlobalBatchDataFetcher """ - self.trainer.fit_loop._data_fetcher = GlobalBatchDataFetcher() - - def on_validation_start(self) -> None: - """PTL hook used to override DataFetcher with GlobalBatchDataFetcher """ - self.trainer.fit_loop.epoch_loop.val_loop._data_fetcher = GlobalBatchDataFetcher() - self.trainer.validate_loop._data_fetcher = GlobalBatchDataFetcher() - - def on_test_start(self) -> None: - self.trainer.test_loop._data_fetcher = GlobalBatchDataFetcher() From 109fa13e546baed92c34a2176a57e170fbd96eb3 Mon Sep 17 00:00:00 2001 From: David Date: Tue, 22 Nov 2022 19:05:40 -0700 Subject: [PATCH 196/244] export_utils bugfix (#5480) * updated export_utils Signed-off-by: David Mosallanezhad * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Signed-off-by: David Mosallanezhad Co-authored-by: David Mosallanezhad Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- nemo/utils/export_utils.py | 46 +++++++------------------------------- 1 file changed, 8 insertions(+), 38 deletions(-) diff --git a/nemo/utils/export_utils.py b/nemo/utils/export_utils.py index 197d3b478167..e4fda73c181d 100644 --- a/nemo/utils/export_utils.py +++ b/nemo/utils/export_utils.py @@ -59,42 +59,6 @@ def forward(self, x): return F.linear(x, self.weight, self.bias), None -class ExportableMatchedScaleMaskSoftmax(nn.Module): - def __init__(self, mod): - super(ExportableMatchedScaleMaskSoftmax, self).__init__() - self.init_module(mod.input_in_fp16, mod.input_in_bf16, mod.mask_func, mod.softmax_in_fp32, mod.scale) - - def init_module( - self, input_in_fp16, input_in_bf16, mask_func, softmax_in_fp32, scale, - ): - self.input_in_fp16 = input_in_fp16 - self.input_in_bf16 = input_in_bf16 - self.softmax_in_fp32 = softmax_in_fp32 - self.mask_func = mask_func - self.scale = scale - - self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16 - - def forward(self, input, mask): - if self.input_in_float16 and self.softmax_in_fp32: - input = input.float() - - if self.scale is not None: - input = input * self.scale - mask_output = self.mask_func(input, mask) if mask is not None else input - probs = torch.nn.Softmax(dim=-1)(mask_output) - all_k_masked = mask.all(axis=-1) - zero_attention_mask = (1.0 - all_k_masked.float())[:, :, :, None] - probs = probs * zero_attention_mask - - if self.input_in_float16 and self.softmax_in_fp32: - if self.input_in_fp16: - probs = probs.half() - else: - probs = probs.bfloat16() - return probs - - def get_export_format(filename: str): _, ext = os.path.splitext(filename) try: @@ -367,7 +331,13 @@ def replace_MatchedScaleMaskSoftmax(n: nn.Module) -> Optional[nn.Linear]: exportable module """ - mod = ExportableMatchedScaleMaskSoftmax(n.input_in_fp16, n.input_in_bf16, n.mask_func, n.softmax_in_fp32, n.scale) + # including the import here to avoid circular imports + from nemo.collections.nlp.modules.common.megatron.fused_softmax import MatchedScaleMaskSoftmax + + # disabling fusion for the MatchedScaleMaskSoftmax + mod = MatchedScaleMaskSoftmax( + n.input_in_fp16, n.input_in_bf16, n.attn_mask_type, False, n.mask_func, n.softmax_in_fp32, n.scale + ) return mod @@ -440,7 +410,7 @@ def script_module(m: nn.Module): "BatchNorm1d": wrap_module(nn.BatchNorm1d, CastToFloat), "BatchNorm2d": wrap_module(nn.BatchNorm2d, CastToFloat), "LayerNorm": wrap_module(nn.LayerNorm, CastToFloat), - "MatchedScaleMaskSoftmax": wrap_module(nn.Softmax, ExportableMatchedScaleMaskSoftmax), + "MatchedScaleMaskSoftmax": wrap_module(None, replace_MatchedScaleMaskSoftmax), } script_replacements = { From 10966a1c177a98fa43a1d00d29443a2f73d7608b Mon Sep 17 00:00:00 2001 From: Boris Fomitchev Date: Wed, 23 Nov 2022 15:19:58 -0800 Subject: [PATCH 197/244] Export fixes for Riva (#5496) * Export fixes for Riva Signed-off-by: Boris Fomitchev * Cleaning up training_utils Signed-off-by: Boris Fomitchev Signed-off-by: Boris Fomitchev --- .../parts/submodules/multi_head_attention.py | 8 +-- .../asr/parts/submodules/subsampling.py | 2 +- .../common/parts/training_utils.py | 49 ------------------- nemo/utils/export_utils.py | 4 +- 4 files changed, 7 insertions(+), 56 deletions(-) delete mode 100644 nemo/collections/common/parts/training_utils.py diff --git a/nemo/collections/asr/parts/submodules/multi_head_attention.py b/nemo/collections/asr/parts/submodules/multi_head_attention.py index 8f774e172718..62206fb7d3da 100644 --- a/nemo/collections/asr/parts/submodules/multi_head_attention.py +++ b/nemo/collections/asr/parts/submodules/multi_head_attention.py @@ -37,7 +37,7 @@ import torch import torch.nn as nn -from nemo.collections.common.parts.training_utils import avoid_float16_autocast_context +from nemo.utils import avoid_float16_autocast_context __all__ = [ 'RelPositionMultiHeadAttention', @@ -337,7 +337,6 @@ def extend_pe(self, length, device): # positive positions would be used for left positions and negative for right positions positions = torch.arange(length - 1, -length, -1, dtype=torch.float32, device=device).unsqueeze(1) self.create_pe(positions=positions) - self.center_pos = torch.tensor(self.pe.size(1) // 2 + 1, dtype=torch.int32, device=device) def forward(self, x, cache_len=0): """Compute positional encoding. @@ -356,8 +355,9 @@ def forward(self, x, cache_len=0): # negative positions would be used for right and positive for left tokens # for input of length L, 2*L-1 positions are needed, positions from (L-1) to -(L-1) input_len = x.size(1) + cache_len - start_pos = self.center_pos - input_len - end_pos = self.center_pos + input_len - 1 + center_pos = self.pe.size(1) // 2 + 1 + start_pos = center_pos - input_len + end_pos = center_pos + input_len - 1 pos_emb = self.pe[:, start_pos:end_pos] if self.dropout_emb: pos_emb = self.dropout_emb(pos_emb) diff --git a/nemo/collections/asr/parts/submodules/subsampling.py b/nemo/collections/asr/parts/submodules/subsampling.py index ff5b2b2e4686..06bce6bb3d0f 100644 --- a/nemo/collections/asr/parts/submodules/subsampling.py +++ b/nemo/collections/asr/parts/submodules/subsampling.py @@ -19,7 +19,7 @@ from torch.nn import LayerNorm from nemo.collections.asr.parts.submodules.causal_convs import CausalConv2D -from nemo.collections.common.parts.training_utils import avoid_bfloat16_autocast_context +from nemo.utils import avoid_bfloat16_autocast_context class StackingSubsampling(torch.nn.Module): diff --git a/nemo/collections/common/parts/training_utils.py b/nemo/collections/common/parts/training_utils.py deleted file mode 100644 index 4b4b1c3c910b..000000000000 --- a/nemo/collections/common/parts/training_utils.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from contextlib import nullcontext - -import torch - -__all__ = ['avoid_bfloat16_autocast_context', 'avoid_float16_autocast_context'] - - -def avoid_bfloat16_autocast_context(): - """ - If the current autocast context is bfloat16, - cast it to float32 - """ - - if torch.is_autocast_enabled() and torch.get_autocast_gpu_dtype() == torch.bfloat16: - return torch.cuda.amp.autocast(dtype=torch.float32) - else: - return nullcontext() - - -def avoid_float16_autocast_context(): - """ - If the current autocast context is float16, cast it to bfloat16 - if available (unless we're in jit) or float32 - """ - - if torch.is_autocast_enabled() and torch.get_autocast_gpu_dtype() == torch.float16: - if torch.jit.is_scripting() or torch.jit.is_tracing(): - return torch.cuda.amp.autocast(dtype=torch.float32) - - if torch.cuda.is_bf16_supported(): - return torch.cuda.amp.autocast(dtype=torch.bfloat16) - else: - return torch.cuda.amp.autocast(dtype=torch.float32) - else: - return nullcontext() diff --git a/nemo/utils/export_utils.py b/nemo/utils/export_utils.py index e4fda73c181d..c89644fd1e68 100644 --- a/nemo/utils/export_utils.py +++ b/nemo/utils/export_utils.py @@ -169,7 +169,7 @@ def run_ts_and_compare(ts_model, ts_input_list, ts_input_dict, output_example, c if torch.is_tensor(expected): tout = out.to('cpu') - logging.debug(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") + logging.debug(f"Checking output {i}, shape: {expected.shape}:\n") this_good = True try: if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=check_tolerance): @@ -191,7 +191,7 @@ def run_ort_and_compare(sess, ort_input, output_example, check_tolerance=0.01): if torch.is_tensor(expected): tout = torch.from_numpy(out) - logging.debug(f"Checking output {i}, shape: {expected.shape}:\n{expected}\n{tout}") + logging.debug(f"Checking output {i}, shape: {expected.shape}:\n") this_good = True try: if not torch.allclose(tout, expected.cpu(), rtol=check_tolerance, atol=100 * check_tolerance): From 0418a1baad6ac5bf10c4b09ea7b7fa8ee0261122 Mon Sep 17 00:00:00 2001 From: David Date: Tue, 29 Nov 2022 12:05:37 -0700 Subject: [PATCH 198/244] minor bug fix (#5521) Signed-off-by: David Mosallanezhad Signed-off-by: David Mosallanezhad Co-authored-by: David Mosallanezhad --- examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py b/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py index e78d34adee65..6fe4422c6a76 100644 --- a/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py +++ b/examples/nlp/language_modeling/megatron_t5_seq2seq_eval.py @@ -127,7 +127,7 @@ def main(cfg) -> None: t5_cfg = MegatronT5FinetuneModel.restore_from( restore_path=cfg.model.restore_from_path, trainer=trainer, return_config=True ) - model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config) + model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) else: validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) model = load_from_checkpoint_dir(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config) From 53dae726c3fea248e798f0ad27301259b9593881 Mon Sep 17 00:00:00 2001 From: David Date: Sun, 4 Dec 2022 22:14:05 -0700 Subject: [PATCH 199/244] added set_start_method + function param bugfix (#5539) * added set_start_method + function param bugfix Signed-off-by: David Mosallanezhad * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * upper bound torchmetrics Signed-off-by: ericharper Signed-off-by: David Mosallanezhad Signed-off-by: ericharper Co-authored-by: David Mosallanezhad Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: ericharper --- .../language_modeling/megatron_gpt_prompt_learning.py | 2 ++ .../nlp/language_modeling/megatron_t5_prompt_learning.py | 3 +++ .../language_modeling/megatron_t5_seq2seq_finetune.py | 9 +++++---- requirements/requirements_lightning.txt | 2 +- 4 files changed, 11 insertions(+), 5 deletions(-) diff --git a/examples/nlp/language_modeling/megatron_gpt_prompt_learning.py b/examples/nlp/language_modeling/megatron_gpt_prompt_learning.py index ddd6b8eb8d97..067836355bc8 100644 --- a/examples/nlp/language_modeling/megatron_gpt_prompt_learning.py +++ b/examples/nlp/language_modeling/megatron_gpt_prompt_learning.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import torch.multiprocessing as mp from omegaconf.omegaconf import OmegaConf, open_dict from pytorch_lightning import Trainer from pytorch_lightning.callbacks.timer import Timer @@ -30,6 +31,7 @@ from nemo.utils import logging from nemo.utils.exp_manager import StatelessTimer, exp_manager +mp.set_start_method("spawn", force=True) """ This is an example of how to ptune/prompt-tune a pretrained GPT model. diff --git a/examples/nlp/language_modeling/megatron_t5_prompt_learning.py b/examples/nlp/language_modeling/megatron_t5_prompt_learning.py index e91c7c178c94..3b9596aa9d51 100644 --- a/examples/nlp/language_modeling/megatron_t5_prompt_learning.py +++ b/examples/nlp/language_modeling/megatron_t5_prompt_learning.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import torch.multiprocessing as mp from omegaconf.omegaconf import OmegaConf, open_dict from pytorch_lightning import Trainer from pytorch_lightning.callbacks.timer import Timer @@ -30,6 +31,8 @@ from nemo.utils import logging from nemo.utils.exp_manager import StatelessTimer, exp_manager +mp.set_start_method("spawn", force=True) + """ This is an example of how to ptune/prompt-tune a pretrained T5 model. diff --git a/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py b/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py index 84b78739f673..70f3e2bbb252 100644 --- a/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py +++ b/examples/nlp/language_modeling/megatron_t5_seq2seq_finetune.py @@ -15,6 +15,7 @@ import os import tempfile +import torch.multiprocessing as mp from omegaconf.omegaconf import OmegaConf, open_dict from pytorch_lightning import Trainer from pytorch_lightning.callbacks.timer import Timer @@ -37,6 +38,8 @@ from nemo.utils.exp_manager import StatelessTimer, exp_manager from nemo.utils.model_utils import inject_model_parallel_rank +mp.set_start_method("spawn", force=True) + def _modify_config(t5_cfg, cfg, add_cfg_to_tree=False): """ @@ -190,7 +193,7 @@ def main(cfg) -> None: model = load_from_nemo(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) else: validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) - model = load_from_checkpoint_dir(MegatronT0Model, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) + model = load_from_checkpoint_dir(MegatronT0Model, cfg, trainer, modify_confg_fn=_modify_config) else: if cfg.model.restore_from_path: t5_cfg = MegatronT5FinetuneModel.restore_from( @@ -199,9 +202,7 @@ def main(cfg) -> None: model = load_from_nemo(MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config) else: validate_checkpoint_loading_args(cfg.model.pretrained_checkpoint) - model = load_from_checkpoint_dir( - MegatronT5FinetuneModel, cfg, trainer, t5_cfg, modify_confg_fn=_modify_config - ) + model = load_from_checkpoint_dir(MegatronT5FinetuneModel, cfg, trainer, modify_confg_fn=_modify_config) trainer.fit(model) trainer.validate(model) diff --git a/requirements/requirements_lightning.txt b/requirements/requirements_lightning.txt index 259bd1289dc7..7b7885b29cf5 100644 --- a/requirements/requirements_lightning.txt +++ b/requirements/requirements_lightning.txt @@ -1,5 +1,5 @@ pytorch-lightning>=1.7.0,<=1.7.7 -torchmetrics>=0.4.1rc0 +torchmetrics>=0.4.1rc0,<=0.10.3 transformers>=4.0.1,<=4.21.2 webdataset>=0.1.48,<=0.1.62 omegaconf>=2.1.2,<2.2 From d9e093457052f44685f3861b023c2a90a02fec18 Mon Sep 17 00:00:00 2001 From: Eric Harper Date: Mon, 5 Dec 2022 12:25:10 -0700 Subject: [PATCH 200/244] remove notebook (#5548) Signed-off-by: ericharper Signed-off-by: ericharper --- .../Non_English_Downstream_Tasks_(NER).ipynb | 899 ------------------ 1 file changed, 899 deletions(-) delete mode 100644 tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb diff --git a/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb b/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb deleted file mode 100644 index f088f8ca4627..000000000000 --- a/tutorials/nlp/Non_English_Downstream_Tasks_(NER).ipynb +++ /dev/null @@ -1,899 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "OETcTQlcguCm" - }, - "outputs": [], - "source": [ - "BRANCH = 'r1.13.0'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "o_0K1lsW1dj9" - }, - "outputs": [], - "source": [ - "\"\"\"\n", - "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", - "4. Run this cell to set up dependencies.\n", - "\"\"\"\n", - "# If you're using Google Colab and not running locally, run this cell\n", - "\n", - "# install NeMo\n", - "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "pC0slAc0h9zN", - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "# If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error:\n", - "# 'ImportError: IProgress not found. Please update jupyter and ipywidgets.'\n", - "\n", - "! pip install ipywidgets\n", - "! jupyter nbextension enable --py widgetsnbextension\n", - "\n", - "# Please restart the kernel after running this cell" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dzqD2WDFOIN-" - }, - "outputs": [], - "source": [ - "from nemo.collections import nlp as nemo_nlp\n", - "from nemo.utils.exp_manager import exp_manager\n", - "\n", - "import os\n", - "import wget \n", - "import torch\n", - "import pytorch_lightning as pl\n", - "from omegaconf import OmegaConf\n", - "\n", - "import zipfile\n", - "import random\n", - "from glob import glob" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "daYw_Xll2ZR9" - }, - "source": [ - "# Tutorial Overview\n", - "In this tutorial, we will show how to use a pre-trained BERT language model on a non-English downstream task. Here we are going to use Persian language and Named entity recognition (NER) task as an example. Note, most of the rest downstream tasks supported in NeMo should work similarly for other languages. \n", - "\n", - "# Task Description\n", - "NER is the task of detecting and classifying key information (entities) in text.\n", - "For example, in a sentence: `Mary lives in Santa Clara and works at NVIDIA`, we should detect that `Mary` is a person, `Santa Clara` is a location and `NVIDIA` is a company.\n", - "\n", - "In this tutorial we will be using [BERT language model](https://arxiv.org/abs/1810.04805).\n", - "\n", - "To read more about other topics and downstream task that can be done in NeMo, you can see the [NeMo's tutorial page](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/).\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZnuziSwJ1yEB" - }, - "source": [ - "# Dataset\n", - "\n", - "In this tutorial we are going to use [Persian Arman dataset for our NER task](https://github.com/HaniehP/PersianNER).\n", - "\n", - "Arman is a hand annotated Persian corpus for NER task with 250,015 tokens and 7,682 sentences. Using [IOB encoding](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)), tokens are labeled with either one of the following name entities or labeled with O. \n", - "\n", - "* event = event\n", - "* fac = facility\n", - "* loc = location\n", - "* org = organization\n", - "* pers = person\n", - "* pro = product\n", - "\n", - "Each of these has a label staring with **B** that indicates it is the first token of the name entity and with **I** for others. \n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qzcZ3nb_-SVT" - }, - "source": [ - "# NeMo Token Classification Data Format\n", - "\n", - "[TokenClassification Model](https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/models/token_classification/token_classification_model.py) in NeMo supports NER and other token level classification tasks, as long as the data follows the format specified below. \n", - "\n", - "Token Classification Model requires the data to be split into 2 files: \n", - "* text.txt \n", - "* labels.txt. \n", - "\n", - "Each line of the **text.txt** file contains text sequences, where words are separated with spaces, i.e.: \n", - "[WORD] [SPACE] [WORD] [SPACE] [WORD].\n", - "\n", - "The **labels.txt** file contains corresponding labels for each word in text.txt, the labels are separated with spaces, i.e.:\n", - "[LABEL] [SPACE] [LABEL] [SPACE] [LABEL].\n", - "\n", - "Example of a text.txt file:\n", - "```\n", - "دبیر شورای عالی انقلاب فرهنگی از گنجانده شدن 5 زبان خارجی جدید در برنامه درسی مدارس خبر داد.\n", - "```\n", - "Corresponding labels.txt file:\n", - "```\n", - "O B_ORG I_ORG I_ORG I_ORG O O O O O O O O O O O O O O \n", - "```" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SL58EWkd2ZVb" - }, - "source": [ - "## Download and preprocess the data¶" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_z2tCEIXZa90" - }, - "source": [ - "You can download the Arman dataset by cloning to the following github repository: https://github.com/HaniehP/PersianNER.\n", - "\n", - "After downloading the data, you will see a few files and folders inside a directory named PersianNER. Take ArmanPersoNERCorpus.zip and upload it to `DATA_DIR` (if running in a docker or locally) or use **files** from Google colab to upload the files.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "n8HZrDmr12_-" - }, - "outputs": [], - "source": [ - "# path to the folder with ArmanPersoNERCorpus.zip file (if running locally on in a docker)\n", - "DATA_DIR = \"PATH_TO_FOLDER_WITH_ZIP.ZIP_FILE\"\n", - "WORK_DIR = \"WORK_DIR\"\n", - "\n", - "# adding an empty subfolder for data (otherwise it can interact with existing folders in DATA_DIR)\n", - "subfolder = f\"{DATA_DIR}/non_eng_NER\"\n", - "\n", - "os.makedirs(WORK_DIR, exist_ok=True)\n", - "os.makedirs(DATA_DIR, exist_ok=True)\n", - "os.makedirs(subfolder, exist_ok=True)\n", - "\n", - "! cp $DATA_DIR/ArmanPersoNERCorpus.zip $subfolder/.\n", - "DATA_DIR = f\"{DATA_DIR}/non_eng_NER\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "k1TmF5rrdPMj" - }, - "outputs": [], - "source": [ - "if 'google.colab' in str(get_ipython):\n", - " from google.colab import files\n", - " uploaded = files.upload() " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "HTUKJOownkrF" - }, - "outputs": [], - "source": [ - "if 'google.colab' in str(get_ipython):\n", - " ! mv ArmanPersoNERCorpus.zip $DATA_DIR/." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NhUzIeF0Yg0l" - }, - "source": [ - "Let's extract files from the zip file. It will generate three test and train files which have overlaps and are intended to be used in turn as train and test sets. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Y01BdjPRW-7B" - }, - "outputs": [], - "source": [ - "! cd $DATA_DIR && unzip \"ArmanPersoNERCorpus.zip\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qaDgL-sQaX2e" - }, - "source": [ - "Next, we will be putting all data into a single file and removing any repeated sentences. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "B0T4CzJvbBJ4" - }, - "outputs": [], - "source": [ - "file_all = os.path.join(DATA_DIR, \"all_data.txt\")\n", - "with open(file_all, \"w\") as f1:\n", - " for filename in glob(f\"{DATA_DIR}/test_fold*.txt\") + glob(f\"{DATA_DIR}/train_fold*.txt\"):\n", - " with open(filename, \"r\", encoding = \"ISO-8859-1\") as f2:\n", - " for line in f2:\n", - " f1.write(line)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VzVuET8HESFB" - }, - "source": [ - "Now, you need to convert this data into NeMo compatible format before starting the training process. For this purpose, you can run [examples/nlp/token_classification/data/import_from_iob_format.py](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/import_from_iob_format.py) on your train and dev files, as follows:\n", - "\n", - "\n", - "\n", - "\n", - "```\n", - "python examples/nlp/token_classification/data/import_from_iob_format.py --data_file PATH_TO_IOB_FORMAT_DATAFILE, e.g., \"DATA_DIR/all_data.txt\"\n", - "```\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ord_6KlkeNl8" - }, - "outputs": [], - "source": [ - "!wget https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/nlp/token_classification/data/import_from_iob_format.py" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "IfSUkxffeSpL" - }, - "outputs": [], - "source": [ - "!python import_from_iob_format.py --data_file $DATA_DIR/all_data.txt" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Aj0rXbYXbivW" - }, - "source": [ - "Now we process the data to remove potentially any repeated sentences and then split them into train and dev sets. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "CgvnTlqzbq5-" - }, - "outputs": [], - "source": [ - "sent_dict = dict()\n", - "line_removed = dict()\n", - "line_counter = 0\n", - "with open(DATA_DIR + \"/text_all_not_repeated.txt\", \"w\") as f1:\n", - " with open(DATA_DIR + \"/text_all_data.txt\", \"r\") as f2:\n", - " for line in f2:\n", - " line_counter += 1\n", - " if (not line in sent_dict):\n", - " sent_dict[line] = 1\n", - " f1.write(line)\n", - " else:\n", - " line_removed[line_counter] = 1\n", - "#labels:\n", - "line_counter = 0\n", - "with open(DATA_DIR + \"/labels_all_not_repeated.txt\", \"w\") as f1:\n", - " with open(DATA_DIR + \"/labels_all_data.txt\", \"r\") as f2:\n", - " for line in f2:\n", - " line_counter += 1\n", - " if(not line_counter in line_removed):\n", - " f1.write(line)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0cO3crs_gXjt" - }, - "source": [ - "After preprocessing the data and removing repeated sentences, there will be 7668 total valid sentences. We will be using 85% of that as train and 15% as dev. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "7oHQYsMMbugP" - }, - "outputs": [], - "source": [ - "total_data = 7668\n", - "train_share = 0.85\n", - "used_lines_train = dict()\n", - "flag = 1\n", - "count = 0\n", - "while flag:\n", - " idx = random.randint(1, total_data)\n", - " if (not idx in used_lines_train):\n", - " used_lines_train[idx] = 1\n", - " count += 1\n", - " if (count/total_data > train_share):\n", - " flag = 0\n", - "\n", - "line_counter = 0\n", - "with open(DATA_DIR+ \"/text_train.txt\", \"w\") as f1:\n", - " with open(DATA_DIR + \"/text_dev.txt\", \"w\") as f2:\n", - " with open(DATA_DIR + \"/text_all_not_repeated.txt\", \"r\") as f3:\n", - " for line in f3:\n", - " line_counter += 1\n", - " if (line_counter in used_lines_train):\n", - " f1.write(line)\n", - " else:\n", - " f2.write(line)\n", - "\n", - "line_counter = 0\n", - "with open(DATA_DIR + \"/labels_train.txt\", \"w\") as f1:\n", - " with open(DATA_DIR + \"/labels_dev.txt\", \"w\") as f2:\n", - " with open(DATA_DIR + \"/labels_all_not_repeated.txt\", \"r\") as f3:\n", - " for line in f3:\n", - " line_counter += 1\n", - " if (line_counter in used_lines_train):\n", - " f1.write(line)\n", - " else:\n", - " f2.write(line)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1Q-GWNwDbzKl" - }, - "source": [ - "Finally, we remove files that are not needed anymore." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "II20ustub5BF" - }, - "outputs": [], - "source": [ - "print(\"Removed files:\")\n", - "for filename in os.listdir(DATA_DIR):\n", - " if (filename == \"text_dev.txt\" or filename == \"text_train.txt\" or filename == \"labels_dev.txt\" or filename == \"labels_train.txt\"):\n", - " continue\n", - " print(filename)\n", - " os.remove(DATA_DIR + \"/\" + filename)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "U8Ty5_S7Ye8h" - }, - "source": [ - "Now, the data folder should contain these 4 files:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "L8vsyh3JZH26" - }, - "source": [ - "\n", - "\n", - "* labels_dev.txt\n", - "* labels_train.txt\n", - "* text_dev.txt\n", - "* text_train.txt\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qB0oLE4R9EhJ" - }, - "outputs": [], - "source": [ - "! ls -l $DATA_DIR" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "6UDPgadLN6SG" - }, - "outputs": [], - "source": [ - "# let's take a look at the data \n", - "print('Text:')\n", - "! head -n 5 {DATA_DIR}/text_train.txt\n", - "\n", - "print('\\nLabels:')\n", - "! head -n 5 {DATA_DIR}/labels_train.txt" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_whKCxfTMo6Y" - }, - "source": [ - "# Model configuration\n", - "\n", - "Our Named Entity Recognition model is comprised of the pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) model followed by a Token Classification layer.\n", - "\n", - "The model is defined in a config file which declares multiple important sections. They are:\n", - "- **model**: All arguments that are related to the Model - language model, token classifier, optimizer and schedulers, datasets and any other related information\n", - "\n", - "- **trainer**: Any argument to be passed to PyTorch Lightning" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "T1gA8PsJ13MJ" - }, - "outputs": [], - "source": [ - "MODEL_CONFIG = \"token_classification_config.yaml\"\n", - "# download the model's configuration file \n", - "config_dir = WORK_DIR + '/configs/'\n", - "os.makedirs(config_dir, exist_ok=True)\n", - "if not os.path.exists(config_dir + MODEL_CONFIG):\n", - " print('Downloading config file...')\n", - " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/token_classification/conf/' + MODEL_CONFIG, config_dir)\n", - "else:\n", - " print ('config file is already exists')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mX3KmWMvSUQw" - }, - "outputs": [], - "source": [ - "# this line will print the entire config of the model\n", - "config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'\n", - "print(config_path)\n", - "config = OmegaConf.load(config_path)\n", - "print(OmegaConf.to_yaml(config))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZCgWzNBkaQLZ" - }, - "source": [ - "# Fine-tuning the model using Arman dataset\n", - "\n", - "Let's select a [`bert-base-multilingual-uncased`](https://huggingface.co/bert-base-multilingual-uncased) BERT model and fine-tune it on the Arman dataset.\n", - "\n", - "## Setting up Data within the config\n", - "\n", - "Among other things, the config file contains dictionaries called dataset, train_ds and validation_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.\n", - "\n", - "We assume that both training and evaluation files are in the same directory and use the default names mentioned during the data download step. \n", - "So, to start model training, we simply need to specify `model.dataset.data_dir`, like we are going to do below.\n", - "\n", - "Also notice that some config lines, including `model.dataset.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user.\n", - "\n", - "Let us now add the data directory path to the config.\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LQHCJN-ZaoLp" - }, - "outputs": [], - "source": [ - "# in this tutorial train and dev datasets are located in the same folder, so it is enought to add the path of the data directory to the config\n", - "config.model.dataset.data_dir = DATA_DIR\n", - "\n", - "# if you want to use the full dataset, set NUM_SAMPLES to -1\n", - "NUM_SAMPLES = 1000\n", - "config.model.train_ds.num_samples = NUM_SAMPLES\n", - "config.model.validation_ds.num_samples = NUM_SAMPLES\n", - "\n", - "# for demonstartion purposes we're running only a single epoch\n", - "config.trainer.max_epochs = 5\n", - "print(OmegaConf.to_yaml(config.model))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nB96-3sTc3yk" - }, - "source": [ - "## Building the PyTorch Lightning Trainer\n", - "\n", - "NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem.\n", - "\n", - "Let's first instantiate a Trainer object" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "1tG4FzZ4Ui60" - }, - "outputs": [], - "source": [ - "print(\"Trainer config - \\n\")\n", - "print(OmegaConf.to_yaml(config.trainer))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "knF6QeQQdMrH" - }, - "outputs": [], - "source": [ - "# lets modify some trainer configs\n", - "# checks if we have GPU available and uses it\n", - "accelerator = 'gpu' if torch.cuda.is_available() else 'cpu'\n", - "config.trainer.devices = 1\n", - "config.trainer.accelerator = accelerator\n", - "\n", - "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", - "\n", - "# for mixed precision training, uncomment the line below (precision should be set to 16 and amp_level to O1):\n", - "# config.trainer.amp_level = O1\n", - "\n", - "# remove distributed training flags\n", - "config.trainer.strategy = None\n", - "\n", - "# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n", - "config.trainer.max_steps = 32\n", - "\n", - "config.exp_manager.exp_dir = WORK_DIR\n", - "trainer = pl.Trainer(**config.trainer)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8IlEMdVxdr6p" - }, - "source": [ - "## Setting up a NeMo Experiment¶\n", - "\n", - "NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "exp_manager(trainer, config.get(\"exp_manager\", None))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "8uztqGAmdrYt" - }, - "outputs": [], - "source": [ - "exp_dir = config.exp_manager.exp_dir\n", - "\n", - "# the exp_dir provides a path to the current experiment for easy access\n", - "exp_dir = str(exp_dir)\n", - "exp_dir" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8tjLhUvL_o7_" - }, - "source": [ - "Before initializing the model, we might want to modify some of the model configs. For example, we might want to modify the pretrained BERT model:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Xeuc2i7Y_nP5" - }, - "outputs": [], - "source": [ - "# get the list of supported BERT-like models, for the complete list of HugginFace models, see https://huggingface.co/models\n", - "print(nemo_nlp.modules.get_pretrained_lm_models_list(include_external=False))\n", - "\n", - "# specify BERT-like model, you want to use\n", - "PRETRAINED_BERT_MODEL = \"bert-base-multilingual-uncased\"" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fzNZNAVRjDD-" - }, - "source": [ - "Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation.\n", - "Also, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NgsGLydWo-6-" - }, - "outputs": [], - "source": [ - "model = nemo_nlp.models.TokenClassificationModel(cfg=config.model, trainer=trainer)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kQ592Tx4pzyB" - }, - "source": [ - "## Monitoring training progress\n", - "Optionally, you can create a Tensorboard visualization to monitor training progress." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mTJr16_pp0aS" - }, - "outputs": [], - "source": [ - "try:\n", - " from google import colab\n", - " COLAB_ENV = True\n", - "except (ImportError, ModuleNotFoundError):\n", - " COLAB_ENV = False\n", - "\n", - "# Load the TensorBoard notebook extension\n", - "if COLAB_ENV:\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir {exp_dir}\n", - "else:\n", - " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Fj1pdEdD0Vm3" - }, - "source": [ - "See how it performs before fine-tuning" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wo1oVGIT0aBZ" - }, - "outputs": [], - "source": [ - "# define the list of queries for inference\n", - "queries = [\n", - " 'حمید طاهایی افزود : برای اجرای این طرحها 0 میلیارد و 0 میلیون ریال اعتبار هزینه شده است . ',\n", - " 'دکتر اصغری دبیر چهارمین همایش انجمن زمین‌شناسی ایران در این زمینه گفت : از مجموع چهار صد مقاله رسیده به دبیرخانه همایش ، يك صد و هشتاد مقاله ظرف مدت دو روز در هشت سالن همایش برگزار شد . '\n", - "]\n", - "results = model.add_predictions(queries)\n", - "\n", - "for query, result in zip(queries, results):\n", - " print()\n", - " print(f'Query : {query}')\n", - " print(f'Result: {result.strip()}\\n')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "kyElt0Es-aSk" - }, - "outputs": [], - "source": [ - "print(\"Trainer config - \\n\")\n", - "print(OmegaConf.to_yaml(config.trainer))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "hUvnSpyjp0Dh" - }, - "outputs": [], - "source": [ - "# start model training\n", - "trainer.fit(model)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MOrR0PeJqa0j" - }, - "source": [ - "After the training is complete, `.nemo` file that contains model's checkpoints and all associated artifacts could be found under `nemo_experiments/token_classification_model/DATE_TIME`" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-lFo27PJ0o3W" - }, - "source": [ - "See how it gets better after:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "9fNcBnz80rLO" - }, - "outputs": [], - "source": [ - "results = model.add_predictions(queries)\n", - "\n", - "for query, result in zip(queries, results):\n", - " print()\n", - " print(f'Query : {query}')\n", - " print(f'Result: {result.strip()}\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JxBiIKMlH8yv" - }, - "source": [ - "After training for 100 epochs, with the default config and NUM_SAMPLES = -1 (i.e. all data is used), your model performance should look similar to this: \n", - "```\n", - " label precision recall f1 support\n", - " O (label_id: 0) 99.09 99.19 99.14 32867\n", - " B-event (label_id: 1) 67.74 70.00 68.85 90\n", - " B-fac (label_id: 2) 70.89 73.68 72.26 76\n", - " B-loc (label_id: 3) 87.45 82.70 85.01 497\n", - " B-org (label_id: 4) 81.88 87.06 84.39 649\n", - " B-pers (label_id: 5) 94.93 93.36 94.14 542\n", - " B-pro (label_id: 6) 79.31 70.41 74.59 98\n", - " I-event (label_id: 7) 87.38 74.72 80.55 352\n", - " I-fac (label_id: 8) 83.08 77.14 80.00 140\n", - " I-loc (label_id: 9) 77.78 73.39 75.52 124\n", - " I-org (label_id: 10) 86.51 89.93 88.18 834\n", - " I-pers (label_id: 11) 95.30 94.35 94.82 301\n", - " I-pro (label_id: 12) 82.86 86.57 84.67 67\n", - " -------------------\n", - " micro avg 97.78 97.78 97.78 36637\n", - " macro avg 84.17 82.50 83.24 36637\n", - " weighted avg 97.78 97.78 97.77 36637\n", - "```\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VZp9STMHQAp1" - }, - "source": [ - "**References**\n", - "\n", - "1. Devlin, Jacob, et al. \"BERT: Pre-training of deep bidirectional transformers for language understanding.\" arXiv preprint arXiv:1810.04805 (2018).\n", - "\n", - "2. Hanieh Poostchi, Ehsan Zare Borzeshi, Mohammad Abdous, and Massimo Piccardi, \"PersoNER: Persian Named-Entity Recognition,\" The 26th International Conference on Computational Linguistics (COLING 2016), pages 3381–3389, Osaka, Japan, 2016.\n", - "\n", - "3. Hanieh Poostchi, Ehsan Zare Borzeshi, and Massimo Piccardi, \"BiLSTM-CRF for Persian Named-Entity Recognition; ArmanPersoNERCorpus: the First Entity-Annotated Persian Dataset,\" The 11th Edition of the Language Resources and Evaluation Conference (LREC), Miyazaki, Japan, 7-12 May 2018, ISLRN 399-379-640-828-6, ISLRN 921-509-141-609-6." - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "Non_English_Downstream_Tasks_(NER).ipynb", - "private_outputs": true, - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.13" - }, - "pycharm": { - "stem_cell": { - "cell_type": "raw", - "metadata": { - "collapsed": false - }, - "source": [] - } - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} From cc49cdaa6ea3392ed255967bbbea758f048a030f Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Tue, 6 Dec 2022 14:37:12 -1000 Subject: [PATCH 201/244] Remove broadcast (#5558) Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy --- .../megatron_t5_prompt_learning_model.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py b/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py index 3e668347ce14..643841c09f14 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_t5_prompt_learning_model.py @@ -464,15 +464,6 @@ def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> A else: encoder_input = torch.zeros((batch_size, seq_length, self.hidden_size), dtype=self.autocast_dtype).cuda() - if self.cfg.get('pipeline_model_parallel_size', 1) > 1: - # Broadcasting encoder inputs to all ranks for now, but this is inefficent. - # TODO: Make Enc-Dec improvement to only boardcast encoder_ids/embeddings when needed - torch.distributed.broadcast( - encoder_input, - parallel_state.get_pipeline_model_parallel_first_rank(), - group=parallel_state.get_pipeline_model_parallel_group(), - ) - predicted_token_ids, log_probs = self.frozen_model.decode( tokens_enc=input_ids, enc_mask=enc_mask, From b0eec2bab2143a7884bdbcda5c418caf083d0cb4 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 7 Dec 2022 07:18:56 -0800 Subject: [PATCH 202/244] cleaning --- examples/tts/vits.py | 1 - .../text_to_speech/tts_tokenizers.py | 9 +-- nemo/collections/tts/models/vits.py | 66 ++++++++++--------- nemo/collections/tts/modules/vits_modules.py | 55 ++++++++-------- 4 files changed, 61 insertions(+), 70 deletions(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index ad6018b1017e..c423486af2f4 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -24,7 +24,6 @@ @hydra_runner(config_path="conf", config_name="vits") def main(cfg): trainer = pl.Trainer(replace_sampler_ddp=False, **cfg.trainer) - # trainer = pl.Trainer(**cfg.trainer) exp_manager(trainer, cfg.get("exp_manager", None)) model = VitsModel(cfg=cfg.model, trainer=trainer) diff --git a/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py b/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py index c669cbea50f2..81fd5c46a648 100644 --- a/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py +++ b/nemo/collections/common/tokenizers/text_to_speech/tts_tokenizers.py @@ -26,7 +26,6 @@ ) from nemo.collections.common.tokenizers.text_to_speech.ipa_lexicon import get_ipa_punctuation_list -from nemo.collections.tts.helpers.helpers import intersperse from nemo.utils import logging from nemo.utils.decorators import experimental @@ -520,7 +519,6 @@ def __init__( oov=BaseTokenizer.OOV, sep='|', # To be able to distinguish between symbols add_blank_at=None, - sep_with_space=False, pad_with_space=False, text_preprocessing_func=lambda text: english_text_preprocessing(text, lower=False), ): @@ -536,7 +534,6 @@ def __init__( sep: Separation token as string. add_blank_at: Add blank to labels in the specified order ("last") or after tokens (any non None), if None then no blank in labels. - sep_with_space: Whether to separate all tokens with spaces (used for VITS) pad_with_space: Whether to pad text with spaces at the beginning and at the end or not. text_preprocessing_func: Text preprocessing function for correct execution of the tokenizer. Basically, it replaces all non-unicode characters with unicode ones. @@ -581,7 +578,6 @@ def __init__( self.punct = punct self.pad_with_space = pad_with_space - self.sep_with_space = sep_with_space self.text_preprocessing_func = text_preprocessing_func self.g2p = g2p @@ -625,10 +621,7 @@ def encode_from_g2p(self, g2p_text: List[str], raw_text: Optional[str] = None): while ps[-1] == space: ps.pop() - if self.sep_with_space: - ps = intersperse(ps, space) - - if self.pad_with_space and not self.sep_with_space: + if self.pad_with_space: ps = [space] + ps + [space] # Token index lookups diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 08b4f5be421c..f36a1c3a2ff3 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -21,6 +21,7 @@ from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger +from pytorch_lightning.utilities import rank_zero_only from torch.cuda.amp import autocast from torch.nn import functional as F @@ -164,36 +165,34 @@ def configure_optimizers(self): # for inference def forward(self, tokens, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=1000): - x_lengths = tokens.size(-1) - y_hat = self.net_g.infer(tokens, x_lengths, sid=sid, noise_scale=noise_scale, - length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=max_len)[0] - - return y_hat + text_len = torch.tensor([tokens.size(-1)]).to(int).to(tokens.device) + audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) = self.net_g.infer(tokens, text_len, sid=sid, noise_scale=noise_scale, + length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=max_len) + return audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) def training_step(self, batch, batch_idx): speakers = None if SpeakerID in self._train_dl.dataset.sup_data_types_set: - (y, y_lengths, x, x_lengths, speakers) = batch + (audio, audio_len, text, text_len, speakers) = batch else: - (y, y_lengths, x, x_lengths) = batch + (audio, audio_len, text, text_len) = batch - spec, spec_lengths = self.audio_to_melspec_processor(y, y_lengths, linear_spec=True) + spec, spec_lengths = self.audio_to_melspec_processor(audio, audio_len, linear_spec=True) with autocast(enabled=True): - y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( - x, x_lengths, spec, spec_lengths, speakers + audio_pred, l_length, attn, ids_slice, text_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + text, text_len, spec, spec_lengths, speakers ) - # y_mel = slice_segments(mel, ids_slice, self._cfg.segment_size // self.cfg.n_window_stride) - y_hat = y_hat.float() + audio_pred = audio_pred.float() - y_hat_mel, _ = self.audio_to_melspec_processor(y_hat.squeeze(1), y_lengths, linear_spec=False) + audio_pred_mel, _ = self.audio_to_melspec_processor(audio_pred.squeeze(1), audio_len, linear_spec=False) - y = slice_segments(y.unsqueeze(1), ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) - y_mel, _ = self.audio_to_melspec_processor(y.squeeze(1), y_lengths, linear_spec=False) + audio = slice_segments(audio.unsqueeze(1), ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + audio_mel, _ = self.audio_to_melspec_processor(audio.squeeze(1), audio_len, linear_spec=False) with autocast(enabled=True): - y_d_hat_r, y_d_hat_g, _, _ = self.net_d(y, y_hat.detach()) + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(audio, audio_pred.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, @@ -210,11 +209,11 @@ def training_step(self, batch, batch_idx): optim_d.step() with autocast(enabled=True): - y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(y, y_hat) + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(audio, audio_pred) # Generator with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) - loss_mel = F.l1_loss(y_mel, y_hat_mel) * self._cfg.c_mel + loss_mel = F.l1_loss(audio_mel, audio_pred_mel) * self._cfg.c_mel loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) @@ -259,44 +258,47 @@ def training_step(self, batch, batch_idx): self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): - speakers = None if self.cfg.n_speakers > 1: - (y, y_lengths, x, x_lengths, speakers) = batch + (audio, audio_len, text, text_len, speakers) = batch else: - (y, y_lengths, x, x_lengths) = batch + (audio, audio_len, text, text_len) = batch - y_hat, attn, mask, *_ = self.net_g.infer(x, x_lengths, speakers, max_len=1000) + audio_pred, attn, mask, *_ = self.net_g.infer(text, text_len, speakers, max_len=1000) - y_hat = y_hat.squeeze() - y_hat_lengths = mask.sum([1, 2]).long() * self._cfg.validation_ds.dataset.hop_length + audio_pred = audio_pred.squeeze() + audio_pred_len = mask.sum([1, 2]).long() * self._cfg.validation_ds.dataset.hop_length - mel, mel_lengths = self.audio_to_melspec_processor(y, y_lengths) - y_hat_mel, y_hat_mel_lengths = self.audio_to_melspec_processor(y_hat, y_hat_lengths) + mel, mel_lengths = self.audio_to_melspec_processor(audio, audio_len) + audio_pred_mel, audio_pred_mel_len = self.audio_to_melspec_processor(audio_pred, audio_pred_len) # plot audio once per epoch if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB: logger = self.logger.experiment + # tokens = self.parse('I speak loud and clear').cuda() + # audio = self.convert_text_to_waveform(tokens=tokens) + # audio_len = torch.tensor(audio.size(-1)).unsqueeze(0).cuda() + # spec, _ = self.audio_to_melspec_processor(audio, audio_len) + specs = [] audios = [] - specs += [ wandb.Image( plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].data.cpu().numpy()), caption=f"val_mel_target", ), wandb.Image( - plot_spectrogram_to_numpy(y_hat_mel[0, :, : y_hat_mel_lengths[0]].data.cpu().numpy()), + plot_spectrogram_to_numpy(audio_pred_mel[0, :, : audio_pred_mel_len[0]].data.cpu().numpy()), caption=f"val_mel_predicted", ), ] audios += [ wandb.Audio( - y[0, : y_lengths[0]].data.cpu().to(torch.float).numpy(), + audio[0, : audio_len[0]].data.cpu().to(torch.float).numpy(), caption=f"val_wav_target", sample_rate=self._cfg.sample_rate, ), wandb.Audio( - y_hat[0, : y_hat_lengths[0]].data.cpu().to(torch.float).numpy(), + audio_pred[0, : audio_pred_len[0]].data.cpu().to(torch.float).numpy(), caption=f"val_wav_predicted", sample_rate=self._cfg.sample_rate, ), @@ -354,5 +356,5 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': # TODO: List available models?? return list_of_models - def convert_text_to_waveform(self, *, tokens): - return self(tokens).squeeze(1) + def convert_text_to_waveform(self, *, tokens, sid=None): + return self(tokens, sid=sid)[0].squeeze(1) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 8ba11c512e70..d0069285c06d 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -591,8 +591,6 @@ def forward(self, x, x_lengths, g=None): x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask - # torch.manual_seed(1) - # torch.cuda.manual_seed(1) m, logs = torch.split(stats, self.out_channels, dim=1) z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask @@ -686,7 +684,6 @@ def forward(self, x): return x, fmap -# TODO: reuse from hifigan if it is possible? class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() @@ -812,23 +809,23 @@ def __init__(self, if n_speakers > 1: self.emb_g = nn.Embedding(n_speakers, gin_channels) - def forward(self, x, x_lengths, y, y_lengths, sid=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) + def forward(self, text, text_len, spec, spec_len, sid=None): + x, mean_prior, logscale_prior, x_mask = self.enc_p(text, text_len) if self.n_speakers > 1: g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) + z, mean_posterior, logscale_posterior, y_mask = self.enc_q(spec, spec_len, g=g) z_p = self.flow(z, y_mask, g=g) with torch.no_grad(): # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] + s_p_sq_r = torch.exp(-2 * logscale_prior) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logscale_prior, [1], keepdim=True) # [b, 1, t_s] neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent3 = torch.matmul(z_p.transpose(1, 2), (mean_prior * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) @@ -844,16 +841,17 @@ def forward(self, x, x_lengths, y, y_lengths, sid=None): l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - z_slice, ids_slice = rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) + z_slice, ids_slice = rand_slice_segments(z, spec_len, self.segment_size) + audio = self.dec(z_slice, g=g) + return audio, l_length, attn, ids_slice, x_mask, y_mask, \ + (z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior) def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 1: + x, mean_prior, logscale_prior, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 1 and sid is not None: g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -864,20 +862,20 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca logw = self.dp(x, x_mask, g=g) w = torch.exp(logw) * x_mask * length_scale w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(get_mask_from_lengths(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + audio_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + audio_mask = torch.unsqueeze(get_mask_from_lengths(audio_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(audio_mask, -1) attn = generate_path(w_ceil, attn_mask) - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) + z_p = mean_prior + torch.randn_like(mean_prior) * torch.exp(logscale_prior) * noise_scale + z = self.flow(z_p, audio_mask, g=g, reverse=True) + audio = self.dec((z * audio_mask)[:,:,:max_len], g=g) + return audio, attn, audio_mask, (z, z_p, mean_prior, logscale_prior) - # Can be used for emotions conversion + # Can be used for emotions def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): assert self.n_speakers > 1, "n_speakers have to be larger than 1." g_src = self.emb_g(sid_src).unsqueeze(-1) @@ -908,7 +906,7 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.norm_layers_1 = nn.ModuleList() self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): + for _ in range(self.n_layers): self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) self.norm_layers_1.append(LayerNorm(hidden_channels)) self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) @@ -921,7 +919,6 @@ def forward(self, x, x_mask): y = self.attn_layers[i](x, x, attn_mask) y = self.drop(y) x = self.norm_layers_1[i](x + y) - y = self.ffn_layers[i](x, x_mask) y = self.drop(y) x = self.norm_layers_2[i](x + y) From cbac4f85b60142388babbb38b78d102d80e3030b Mon Sep 17 00:00:00 2001 From: Sandeep Subramanian Date: Wed, 7 Dec 2022 05:39:23 -1000 Subject: [PATCH 203/244] Fix all gather while writing to a file during T5 finetuning (#5561) * Gather from data parallel only instead of all ranks Signed-off-by: MaximumEntropy * Fix Signed-off-by: MaximumEntropy Signed-off-by: MaximumEntropy --- .../nlp/models/language_modeling/megatron_finetune_model.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py b/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py index 941048304f6a..46d6455327af 100644 --- a/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py +++ b/nemo/collections/nlp/models/language_modeling/megatron_finetune_model.py @@ -427,7 +427,7 @@ def inference_epoch_end(self, outputs, mode, data_cfg): ) # Gather the outputs object from all data parallel ranks since we are using the DistributedSampler which splits data across DDP ranks. - gathered_outputs = [None for _ in range(self.world_size)] + gathered_outputs = [None for _ in range(parallel_state.get_data_parallel_world_size())] torch.distributed.all_gather_object( gathered_outputs, [ @@ -439,6 +439,7 @@ def inference_epoch_end(self, outputs, mode, data_cfg): } for x in output ], + group=parallel_state.get_data_parallel_group(), ) # Figure out what the suffix of the file should be. @@ -455,7 +456,7 @@ def inference_epoch_end(self, outputs, mode, data_cfg): # PTL models have a self.global_rank attribute and we want to write to disk only on global rank 0. if self.global_rank == 0: - for rank in range(0, self.world_size): + for rank in range(0, parallel_state.get_data_parallel_world_size()): for batch in gathered_outputs[rank]: for pred, label, input, category in zip( batch['preds'], batch['labels'], batch['inputs'], batch['categories'] From 1ff05cce8a5e37f8acc158b27ef62744e64e72f3 Mon Sep 17 00:00:00 2001 From: ericharper Date: Wed, 7 Dec 2022 10:31:17 -0700 Subject: [PATCH 204/244] update readme Signed-off-by: ericharper --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 43bb89139045..59f171fce214 100644 --- a/README.rst +++ b/README.rst @@ -223,7 +223,7 @@ Install it manually if not using the NVIDIA PyTorch container. git clone https://github.com/ericharper/apex.git cd apex - git checkout nm_v1.11.0 + git checkout nm_v1.13.0 pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" --global-option="--fast_layer_norm" --global-option="--distributed_adam" --global-option="--deprecated_fused_adam" ./ Transformer Engine From fd05dd2c70047879d4df8c1643daf0fcb1c14e2c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 18:16:49 +0000 Subject: [PATCH 205/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- examples/tts/vits.py | 1 + nemo/collections/tts/helpers/helpers.py | 16 +- nemo/collections/tts/helpers/splines.py | 155 +++--- nemo/collections/tts/losses/vits_losses.py | 4 +- nemo/collections/tts/models/base.py | 3 +- nemo/collections/tts/models/vits.py | 93 ++-- .../tts/modules/monotonic_align/__init__.py | 4 +- .../tts/modules/monotonic_align/numba_core.py | 2 +- .../tts/modules/monotonic_align/setup.py | 6 +- nemo/collections/tts/modules/vits_modules.py | 500 +++++++++++------- nemo/collections/tts/torch/data.py | 110 ++-- 11 files changed, 522 insertions(+), 372 deletions(-) diff --git a/examples/tts/vits.py b/examples/tts/vits.py index c423486af2f4..e4af05f0b9e6 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -13,6 +13,7 @@ # limitations under the License. from pickle import FALSE + import pytorch_lightning as pl from nemo.collections.common.callbacks import LogEpochTimeCallback diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index e79cfe017d35..7a9415db2bad 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -141,6 +141,7 @@ def get_mask_from_lengths(lengths, max_len: Optional[int] = None): mask = (ids < lengths.unsqueeze(1)).bool() return mask + @jit(nopython=True) def mas(attn_map, width=1): # assumes mel x text @@ -560,6 +561,7 @@ def split_view(tensor, split_size: int, dim: int = 0): new_shape = cur_shape[:dim] + (tensor.shape[dim] // split_size, split_size) + cur_shape[dim + 1 :] return tensor.reshape(*new_shape) + def slice_segments(x, ids_str, segment_size=4): ret = torch.zeros_like(x[:, :, :segment_size]) for i in range(x.size(0)): @@ -580,11 +582,12 @@ def rand_slice_segments(x, x_lengths=None, segment_size=4): ids_str_max = x_lengths - segment_size + 1 ids_str_max = ids_str_max.to(device=x.device) ids_str = (torch.rand([b]).to(device=x.device) * ids_str_max).to(dtype=torch.long) - + ret = slice_segments(x, ids_str, segment_size) - + return ret, ids_str + def clip_grad_value_(parameters, clip_value, norm_type=2): if isinstance(parameters, torch.Tensor): parameters = [parameters] @@ -599,19 +602,22 @@ def clip_grad_value_(parameters, clip_value, norm_type=2): total_norm += param_norm.item() ** norm_type if clip_value is not None: p.grad.data.clamp_(min=-clip_value, max=clip_value) - total_norm = total_norm ** (1. / norm_type) + total_norm = total_norm ** (1.0 / norm_type) return total_norm + def intersperse(lst, item): result = [item] * (len(lst) * 2 + 1) result[1::2] = lst return result + def convert_pad_shape(pad_shape): l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] return pad_shape + def generate_path(duration, mask): """ duration: [b, 1, t_x] @@ -624,5 +630,5 @@ def generate_path(duration, mask): path = get_mask_from_lengths(cum_duration_flat, t_y).to(mask.dtype) path = path.view(b, t_x, t_y) path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] - path = path.unsqueeze(1).transpose(2,3) * mask - return path \ No newline at end of file + path = path.unsqueeze(1).transpose(2, 3) * mask + return path diff --git a/nemo/collections/tts/helpers/splines.py b/nemo/collections/tts/helpers/splines.py index b5fe68bb3d93..a4494efa2b0e 100644 --- a/nemo/collections/tts/helpers/splines.py +++ b/nemo/collections/tts/helpers/splines.py @@ -289,58 +289,58 @@ def piecewise_quadratic_transform(x, w_tilde, v_tilde, inverse=False): inv = inv.clamp(min=torch.finfo(c.dtype).eps, max=1.0 - torch.finfo(inv.dtype).eps) return inv, None -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=1e-3, - min_bin_height=1e-3, - min_derivative=1e-3): + +def piecewise_rational_quadratic_transform( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails=None, + tail_bound=1.0, + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3, +): if tails is None: spline_fn = rational_quadratic_spline spline_kwargs = {} else: spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } + spline_kwargs = {'tails': tails, 'tail_bound': tail_bound} outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs + inputs=inputs, + unnormalized_widths=unnormalized_widths, + unnormalized_heights=unnormalized_heights, + unnormalized_derivatives=unnormalized_derivatives, + inverse=inverse, + min_bin_width=min_bin_width, + min_bin_height=min_bin_height, + min_derivative=min_derivative, + **spline_kwargs ) return outputs, logabsdet def searchsorted(bin_locations, inputs, eps=1e-6): bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=1e-3, - min_bin_height=1e-3, - min_derivative=1e-3): + return torch.sum(inputs[..., None] >= bin_locations, dim=-1) - 1 + + +def unconstrained_rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + tails='linear', + tail_bound=1.0, + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3, +): inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) outside_interval_mask = ~inside_interval_mask @@ -364,24 +364,32 @@ def unconstrained_rational_quadratic_spline(inputs, unnormalized_heights=unnormalized_heights[inside_interval_mask, :], unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, + left=-tail_bound, + right=tail_bound, + bottom=-tail_bound, + top=tail_bound, min_bin_width=min_bin_width, min_bin_height=min_bin_height, - min_derivative=min_derivative + min_derivative=min_derivative, ) return outputs, logabsdet -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=1e-3, - min_bin_height=1e-3, - min_derivative=1e-3): - + +def rational_quadratic_spline( + inputs, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=False, + left=0.0, + right=1.0, + bottom=0.0, + top=1.0, + min_bin_width=1e-3, + min_bin_height=1e-3, + min_derivative=1e-3, +): if torch.min(inputs) < left or torch.max(inputs) > right: raise ValueError('Input to a transform is not within its domain') @@ -431,15 +439,13 @@ def rational_quadratic_spline(inputs, input_heights = heights.gather(-1, bin_idx)[..., 0] if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) + a = (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + input_heights * (input_delta - input_derivatives) + b = input_heights * input_derivatives - (inputs - input_cumheights) * ( + input_derivatives + input_derivatives_plus_one - 2 * input_delta + ) + c = -input_delta * (inputs - input_cumheights) discriminant = b.pow(2) - 4 * a * c assert (discriminant >= 0).all() @@ -448,11 +454,14 @@ def rational_quadratic_spline(inputs, outputs = root * input_bin_widths + input_cumwidths theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta + ) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * root.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - root).pow(2) + ) logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) return outputs, -logabsdet @@ -460,15 +469,17 @@ def rational_quadratic_spline(inputs, theta = (inputs - input_cumwidths) / input_bin_widths theta_one_minus_theta = theta * (1 - theta) - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) + numerator = input_heights * (input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta) + denominator = input_delta + ( + (input_derivatives + input_derivatives_plus_one - 2 * input_delta) * theta_one_minus_theta + ) outputs = input_cumheights + numerator / denominator - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) + derivative_numerator = input_delta.pow(2) * ( + input_derivatives_plus_one * theta.pow(2) + + 2 * input_delta * theta_one_minus_theta + + input_derivatives * (1 - theta).pow(2) + ) logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - return outputs, logabsdet \ No newline at end of file + return outputs, logabsdet diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index fbae9f11ba1b..702873d1fa81 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -80,6 +80,7 @@ def forward(self, z_p, logs_q, m_p, logs_p, z_mask): l = kl / torch.sum(z_mask) return l + class FeatureMatchingLoss(Loss): """VITS Feature Matching Loss module""" @@ -110,6 +111,7 @@ def forward(self, fmap_r, fmap_g): return loss * 2 + class DiscriminatorLoss(Loss): """Discriminator Loss module""" @@ -180,4 +182,4 @@ def forward(self, disc_outputs): gen_losses.append(l) loss += l - return loss, gen_losses \ No newline at end of file + return loss, gen_losses diff --git a/nemo/collections/tts/models/base.py b/nemo/collections/tts/models/base.py index 3872722d1de7..8aa38de62da4 100644 --- a/nemo/collections/tts/models/base.py +++ b/nemo/collections/tts/models/base.py @@ -228,6 +228,7 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': list_of_models.extend(subclass_models) return list_of_models + class TextToWaveform(ModelPT, ABC): """ Base class for all end-to-end TTS models that generate a waveform from text """ @@ -261,4 +262,4 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': subclass_models = subclass.list_available_models() if subclass_models is not None and len(subclass_models) > 0: list_of_models.extend(subclass_models) - return list_of_models \ No newline at end of file + return list_of_models diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index f36a1c3a2ff3..bd5661ff56cd 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -14,6 +14,7 @@ import contextlib + import omegaconf import torch import wandb @@ -25,17 +26,8 @@ from torch.cuda.amp import autocast from torch.nn import functional as F -from nemo.collections.tts.helpers.helpers import ( - slice_segments, - clip_grad_value_, - plot_spectrogram_to_numpy, -) -from nemo.collections.tts.losses.vits_losses import ( - KlLoss, - FeatureMatchingLoss, - DiscriminatorLoss, - GeneratorLoss -) +from nemo.collections.tts.helpers.helpers import clip_grad_value_, plot_spectrogram_to_numpy, slice_segments +from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss, KlLoss from nemo.collections.tts.models.base import TextToWaveform from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator from nemo.collections.tts.torch.data import DistributedBucketSampler @@ -44,14 +36,13 @@ from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils - - HAVE_WANDB = True try: import wandb except ModuleNotFoundError: HAVE_WANDB = False + class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Convert to Hydra 1.0 compatible DictConfig @@ -82,14 +73,16 @@ def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): self.gen_loss = GeneratorLoss() self.kl_loss = KlLoss() - self.net_g = instantiate(cfg.synthesizer, + self.net_g = instantiate( + cfg.synthesizer, n_vocab=num_tokens, spec_channels=cfg.n_fft // 2 + 1, segment_size=cfg.segment_size // cfg.n_window_stride, - padding_idx=self.tokenizer_pad,) - + padding_idx=self.tokenizer_pad, + ) + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) - + self.automatic_optimization = False def _setup_normalizer(self, cfg): @@ -148,14 +141,18 @@ def configure_optimizers(self): optim_g = instantiate(optim_config, params=self.net_g.parameters(),) optim_d = instantiate(optim_config, params=self.net_d.parameters(),) - + if sched_config is not None: if sched_config.name == 'ExponentialLR': scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=sched_config.lr_decay) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=sched_config.lr_decay) elif sched_config.name == 'CosineAnnealing': - scheduler_g = CosineAnnealing(optimizer=optim_g, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr,) - scheduler_d = CosineAnnealing(optimizer=optim_d, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr,) + scheduler_g = CosineAnnealing( + optimizer=optim_g, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr, + ) + scheduler_d = CosineAnnealing( + optimizer=optim_d, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr, + ) scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} @@ -164,10 +161,17 @@ def configure_optimizers(self): return [optim_g, optim_d] # for inference - def forward(self, tokens, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=1000): + def forward(self, tokens, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=1000): text_len = torch.tensor([tokens.size(-1)]).to(int).to(tokens.device) - audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) = self.net_g.infer(tokens, text_len, sid=sid, noise_scale=noise_scale, - length_scale=length_scale, noise_scale_w=noise_scale_w, max_len=max_len) + audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) = self.net_g.infer( + tokens, + text_len, + sid=sid, + noise_scale=noise_scale, + length_scale=length_scale, + noise_scale_w=noise_scale_w, + max_len=max_len, + ) return audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) def training_step(self, batch, batch_idx): @@ -178,7 +182,7 @@ def training_step(self, batch, batch_idx): (audio, audio_len, text, text_len) = batch spec, spec_lengths = self.audio_to_melspec_processor(audio, audio_len, linear_spec=True) - + with autocast(enabled=True): audio_pred, l_length, attn, ids_slice, text_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( text, text_len, spec, spec_lengths, speakers @@ -186,28 +190,29 @@ def training_step(self, batch, batch_idx): audio_pred = audio_pred.float() - audio_pred_mel, _ = self.audio_to_melspec_processor(audio_pred.squeeze(1), audio_len, linear_spec=False) - + audio_pred_mel, _ = self.audio_to_melspec_processor(audio_pred.squeeze(1), audio_len, linear_spec=False) + audio = slice_segments(audio.unsqueeze(1), ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) audio_mel, _ = self.audio_to_melspec_processor(audio.squeeze(1), audio_len, linear_spec=False) - + with autocast(enabled=True): y_d_hat_r, y_d_hat_g, _, _ = self.net_d(audio, audio_pred.detach()) with autocast(enabled=False): - loss_disc, losses_disc_r, losses_disc_g = self.disc_loss(disc_real_outputs=y_d_hat_r, - disc_generated_outputs=y_d_hat_g) + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss( + disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g + ) loss_disc_all = loss_disc # get optimizers optim_g, optim_d = self.optimizers() - + # train discriminator optim_d.zero_grad() self.manual_backward(loss_disc_all) norm_d = clip_grad_value_(self.net_d.parameters(), None) optim_d.step() - + with autocast(enabled=True): y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(audio, audio_pred) # Generator @@ -219,7 +224,6 @@ def training_step(self, batch, batch_idx): loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl - # train generator optim_g.zero_grad() self.manual_backward(loss_gen_all) @@ -229,8 +233,11 @@ def training_step(self, batch, batch_idx): schedulers = self.lr_schedulers() if schedulers is not None: sch1, sch2 = schedulers - if self.trainer.is_last_batch and isinstance(sch1, torch.optim.lr_scheduler.ExponentialLR) \ - or isinstance(sch1, CosineAnnealing): + if ( + self.trainer.is_last_batch + and isinstance(sch1, torch.optim.lr_scheduler.ExponentialLR) + or isinstance(sch1, CosineAnnealing) + ): sch1.step() sch2.step() @@ -278,12 +285,13 @@ def validation_step(self, batch, batch_idx): # audio = self.convert_text_to_waveform(tokens=tokens) # audio_len = torch.tensor(audio.size(-1)).unsqueeze(0).cuda() # spec, _ = self.audio_to_melspec_processor(audio, audio_len) - + specs = [] audios = [] specs += [ wandb.Image( - plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].data.cpu().numpy()), caption=f"val_mel_target", + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].data.cpu().numpy()), + caption=f"val_mel_target", ), wandb.Image( plot_spectrogram_to_numpy(audio_pred_mel[0, :, : audio_pred_mel_len[0]].data.cpu().numpy()), @@ -324,7 +332,7 @@ def _loader(self, cfg): ) def train_dataloader(self): - # default used by the Trainer + # default used by the Trainer dataset = instantiate( self.cfg.train_ds.dataset, text_normalizer=self.normalizer, @@ -332,14 +340,13 @@ def train_dataloader(self): text_tokenizer=self.tokenizer, ) - train_sampler = DistributedBucketSampler( - dataset, - **self.cfg.train_ds.batch_sampler) + train_sampler = DistributedBucketSampler(dataset, **self.cfg.train_ds.batch_sampler) - dataloader = torch.utils.data.DataLoader(dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, - **self.cfg.train_ds.dataloader_params,) + dataloader = torch.utils.data.DataLoader( + dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, **self.cfg.train_ds.dataloader_params, + ) return dataloader - + def setup_training_data(self, cfg): self._train_dl = self._loader(cfg) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 15be08d3e566..2f8f95d68eb3 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -33,9 +33,9 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +import numba import numpy as np import torch -import numba # from .numba_core import maximum_path_c @@ -100,4 +100,4 @@ def maximum_path_c(paths, values, t_ys, t_xs): """ b: int = paths.shape[0] for i in numba.prange(b): - maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) \ No newline at end of file + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index 1d23c232b572..56640e45673c 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -49,4 +49,4 @@ def maximum_path_c(paths, values, t_ys, t_xs): if __name__ == '__main__': - pass \ No newline at end of file + pass diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py index 76e09159c6cd..8d7d8b843d47 100644 --- a/nemo/collections/tts/modules/monotonic_align/setup.py +++ b/nemo/collections/tts/modules/monotonic_align/setup.py @@ -35,10 +35,12 @@ # SOFTWARE. from distutils.core import setup + # from Cython.Build import cythonize import numpy setup( - name='monotonic_align', + name='monotonic_align', # ext_modules=cythonize("core.pyx"), - include_dirs=[numpy.get_include()]) + include_dirs=[numpy.get_include()], +) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index d0069285c06d..6d12117e5749 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -38,16 +38,21 @@ import numpy as np import torch +from librosa.filters import mel as librosa_mel_fn from torch import nn -from torch.nn import Conv1d, ConvTranspose1d, Conv2d +from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F -from librosa.filters import mel as librosa_mel_fn -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm - -from nemo.collections.tts.modules.hifigan_modules import ResBlock1, ResBlock2, init_weights, get_padding +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm + +from nemo.collections.tts.helpers.helpers import ( + convert_pad_shape, + generate_path, + get_mask_from_lengths, + rand_slice_segments, +) from nemo.collections.tts.helpers.splines import piecewise_rational_quadratic_transform +from nemo.collections.tts.modules.hifigan_modules import ResBlock1, ResBlock2, get_padding, init_weights from nemo.collections.tts.modules.monotonic_align import maximum_path -from nemo.collections.tts.helpers.helpers import convert_pad_shape, generate_path, get_mask_from_lengths, rand_slice_segments LRELU_SLOPE = 0.1 @@ -61,6 +66,7 @@ def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): acts = t_act * s_act return acts + class LayerNorm(nn.Module): def __init__(self, channels, eps=1e-5): super().__init__() @@ -91,9 +97,7 @@ def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_la self.norm_layers = nn.ModuleList() self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) for _ in range(n_layers - 1): self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) self.norm_layers.append(LayerNorm(hidden_channels)) @@ -116,7 +120,7 @@ class DDSConv(nn.Module): Dialted and Depth-Separable Convolution """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): super().__init__() self.channels = channels self.kernel_size = kernel_size @@ -131,9 +135,9 @@ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): for i in range(n_layers): dilation = kernel_size ** i padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) + self.convs_sep.append( + nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding) + ) self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) self.norms_1.append(LayerNorm(channels)) self.norms_2.append(LayerNorm(channels)) @@ -156,9 +160,9 @@ def forward(self, x, x_mask, g=None): class WN(torch.nn.Module): def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): super(WN, self).__init__() - assert (kernel_size % 2 == 1) + assert kernel_size % 2 == 1 self.hidden_channels = hidden_channels - self.kernel_size = kernel_size, + self.kernel_size = (kernel_size,) self.dilation_rate = dilation_rate self.n_layers = n_layers self.gin_channels = gin_channels @@ -175,8 +179,9 @@ def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_ch for i in range(n_layers): dilation = dilation_rate ** i padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size, - dilation=dilation, padding=padding) + in_layer = torch.nn.Conv1d( + hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding + ) in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') self.in_layers.append(in_layer) @@ -201,21 +206,18 @@ def forward(self, x, x_mask, g=None, **kwargs): x_in = self.in_layers[i](x) if g is not None: cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :] + g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] else: g_l = torch.zeros_like(x_in) - acts = fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) + acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) acts = self.drop(acts) res_skip_acts = self.res_skip_layers[i](acts) if i < self.n_layers - 1: - res_acts = res_skip_acts[:, :self.hidden_channels, :] + res_acts = res_skip_acts[:, : self.hidden_channels, :] x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels:, :] + output = output + res_skip_acts[:, self.hidden_channels :, :] else: output = output + res_skip_acts return output * x_mask @@ -269,15 +271,17 @@ def forward(self, x, x_mask, reverse=False, **kwargs): class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False, + ): assert channels % 2 == 0, "channels should be divisible by 2" super().__init__() self.channels = channels @@ -289,8 +293,9 @@ def __init__(self, self.mean_only = mean_only self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, - gin_channels=gin_channels) + self.enc = WN( + hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels + ) self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) self.post.weight.data.zero_() self.post.bias.data.zero_() @@ -329,7 +334,7 @@ def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins self.half_channels = in_channels // 2 self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) self.proj.weight.data.zero_() self.proj.bias.data.zero_() @@ -343,19 +348,20 @@ def forward(self, x, x_mask, g=None, reverse=False): b, c, t = x0.shape h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2 * self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - + unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins :] + if x1.size(0) != 0: - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound, + ) else: logdet = 0 x = torch.cat([x0, x1], 1) * x_mask @@ -369,7 +375,7 @@ def forward(self, x, x_mask, g=None, reverse=False): class StochasticDurationPredictor(nn.Module): def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): super().__init__() - filter_channels = in_channels # it needs to be removed from future version. + filter_channels = in_channels # it needs to be removed from future version. self.in_channels = in_channels self.filter_channels = filter_channels self.kernel_size = kernel_size @@ -426,8 +432,8 @@ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): z_u, z1 = torch.split(z_q, [1, 1], 1) u = torch.sigmoid(z_u) * x_mask z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) + logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q logdet_tot = 0 z0, logdet = self.log_flow(z0, x_mask) @@ -436,11 +442,11 @@ def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): for flow in flows: z, logdet = flow(z, x_mask, g=x, reverse=reverse) logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] + nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot + return nll + logq # [b] else: flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow + flows = flows[:-2] + [flows[-1]] # remove a useless vflow z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale for flow in flows: z = flow(z, x_mask, g=x, reverse=reverse) @@ -460,9 +466,9 @@ def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_cha self.gin_channels = gin_channels self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_1 = LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) self.norm_2 = LayerNorm(filter_channels) self.proj = nn.Conv1d(filter_channels, 1, 1) @@ -487,16 +493,18 @@ def forward(self, x, x_mask, g=None): class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - padding_idx): + def __init__( + self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + ): super().__init__() self.n_vocab = n_vocab self.out_channels = out_channels @@ -508,20 +516,14 @@ def __init__(self, self.p_dropout = p_dropout self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.encoder = Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) + self.encoder = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype) x = self.encoder(x * x_mask, x_mask) @@ -532,14 +534,7 @@ def forward(self, x, x_lengths): class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): + def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0): super().__init__() self.channels = channels self.hidden_channels = hidden_channels @@ -551,7 +546,17 @@ def __init__(self, self.flows = nn.ModuleList() for i in range(n_flows): - self.flows.append(ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) + self.flows.append( + ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) self.flows.append(Flip()) def forward(self, x, x_mask, g=None, reverse=False): @@ -565,14 +570,9 @@ def forward(self, x, x_mask, g=None, reverse=False): class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): + def __init__( + self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0 + ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels @@ -595,8 +595,19 @@ def forward(self, x, x_lengths, g=None): z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask return z, m, logs, x_mask + class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): super(Generator, self).__init__() self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) @@ -605,13 +616,21 @@ def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_di self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) + self.ups.append( + weight_norm( + ConvTranspose1d( + upsample_initial_channel // (2 ** i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) self.resblocks = nn.ModuleList() for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) + ch = upsample_initial_channel // (2 ** (i + 1)) for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): self.resblocks.append(resblock(ch, k, d)) @@ -631,7 +650,7 @@ def forward(self, x, g=None): x = self.ups[i](x) xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device) for j in range(self.num_kernels): - xs += self.resblocks[i*self.num_kernels+j](x) + xs += self.resblocks[i * self.num_kernels + j](x) x = xs / self.num_kernels x = F.leaky_relu(x) x = self.conv_post(x) @@ -646,19 +665,22 @@ def remove_weight_norm(self): for l in self.resblocks: l.remove_weight_norm() + class DiscriminatorP(torch.nn.Module): def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): super(DiscriminatorP, self).__init__() self.period = period self.use_spectral_norm = use_spectral_norm norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) + self.convs = nn.ModuleList( + [ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ] + ) self.dropout = nn.Dropout(0.3) self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) @@ -667,7 +689,7 @@ def forward(self, x): # 1d to 2d b, c, t = x.shape - if t % self.period != 0: # pad first + if t % self.period != 0: # pad first n_pad = self.period - (t % self.period) x = F.pad(x, (0, n_pad), "reflect") t = t + n_pad @@ -684,18 +706,21 @@ def forward(self, x): return x, fmap + class DiscriminatorS(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(DiscriminatorS, self).__init__() norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 16, 15, 1, padding=7)), + norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) self.dropout = nn.Dropout(0.3) self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) @@ -712,10 +737,11 @@ def forward(self, x): return x, fmap + class MultiPeriodDiscriminator(torch.nn.Module): def __init__(self, use_spectral_norm=False): super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] + periods = [2, 3, 5, 7, 11] discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] @@ -742,28 +768,30 @@ class SynthesizerTrn(nn.Module): Synthesizer for Training """ - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - padding_idx, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): + def __init__( + self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs + ): super().__init__() self.n_vocab = n_vocab @@ -788,17 +816,30 @@ def __init__(self, self.use_sdp = use_sdp - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - padding_idx) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) + self.enc_p = TextEncoder( + n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels + ) self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) if use_sdp: @@ -812,7 +853,7 @@ def __init__(self, def forward(self, text, text_len, spec, spec_len, sid=None): x, mean_prior, logscale_prior, x_mask = self.enc_p(text, text_len) if self.n_speakers > 1: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -821,11 +862,15 @@ def forward(self, text, text_len, spec, spec_len, sid=None): with torch.no_grad(): # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logscale_prior) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logscale_prior, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (mean_prior * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + s_p_sq_r = torch.exp(-2 * logscale_prior) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logscale_prior, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul( + -0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r + ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul( + z_p.transpose(1, 2), (mean_prior * s_p_sq_r) + ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) @@ -838,21 +883,32 @@ def forward(self, text, text_len, spec, spec_len, sid=None): else: logw_ = torch.log(w + 1e-6) * x_mask logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging + l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging # expand prior - mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] + logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] z_slice, ids_slice = rand_slice_segments(z, spec_len, self.segment_size) audio = self.dec(z_slice, g=g) - return audio, l_length, attn, ids_slice, x_mask, y_mask, \ - (z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): + return ( + audio, + l_length, + attn, + ids_slice, + x_mask, + y_mask, + (z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior), + ) + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): x, mean_prior, logscale_prior, x_mask = self.enc_p(x, x_lengths) if self.n_speakers > 1 and sid is not None: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None @@ -867,12 +923,16 @@ def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_sca attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(audio_mask, -1) attn = generate_path(w_ceil, attn_mask) - mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] + mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] + logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] z_p = mean_prior + torch.randn_like(mean_prior) * torch.exp(logscale_prior) * noise_scale z = self.flow(z_p, audio_mask, g=g, reverse=True) - audio = self.dec((z * audio_mask)[:,:,:max_len], g=g) + audio = self.dec((z * audio_mask)[:, :, :max_len], g=g) return audio, attn, audio_mask, (z, z_p, mean_prior, logscale_prior) # Can be used for emotions @@ -891,7 +951,17 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): # Attentions # ############## class Encoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=4, + **kwargs + ): super().__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels @@ -907,9 +977,15 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() for _ in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size + ) + ) self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout) + ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask): @@ -927,7 +1003,18 @@ def forward(self, x, x_mask): class Decoder(nn.Module): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + proximal_bias=False, + proximal_init=True, + **kwargs + ): super().__init__() self.hidden_channels = hidden_channels self.filter_channels = filter_channels @@ -946,11 +1033,24 @@ def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_s self.ffn_layers = nn.ModuleList() self.norm_layers_2 = nn.ModuleList() for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) + self.self_attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + proximal_bias=proximal_bias, + proximal_init=proximal_init, + ) + ) self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.encdec_attn_layers.append( + MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout) + ) self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True) + ) self.norm_layers_2.append(LayerNorm(hidden_channels)) def forward(self, x, x_mask, h, h_mask): @@ -958,7 +1058,12 @@ def forward(self, x, x_mask, h, h_mask): x: decoder input h: encoder output """ - self_attn_mask = torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))).unsqueeze(0).unsqueeze(0).to(device=x.device, dtype=x.dtype) + self_attn_mask = ( + torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))) + .unsqueeze(0) + .unsqueeze(0) + .to(device=x.device, dtype=x.dtype) + ) encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) x = x * x_mask for i in range(self.n_layers): @@ -978,7 +1083,18 @@ def forward(self, x, x_mask, h, h_mask): class MultiHeadAttention(nn.Module): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): super().__init__() assert channels % n_heads == 0 @@ -1002,7 +1118,7 @@ def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=No if window_size is not None: n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 + rel_stddev = self.k_channels ** -0.5 self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) @@ -1035,7 +1151,7 @@ def attention(self, query, key, value, mask=None): if self.window_size is not None: assert t_s == t_t, "Relative attention is only available for self-attention." key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) + rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings) scores_local = self._relative_position_to_absolute_position(rel_logits) scores = scores + scores_local if self.proximal_bias: @@ -1047,14 +1163,14 @@ def attention(self, query, key, value, mask=None): assert t_s == t_t, "Local attention is only available for self-attention." block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) scores = scores.masked_fill(block_mask == 0, -1e4) - p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] p_attn = self.drop(p_attn) output = torch.matmul(p_attn, value) if self.window_size is not None: relative_weights = self._absolute_position_to_relative_position(p_attn) value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] return output, p_attn def _matmul_with_relative_values(self, x, y): @@ -1082,11 +1198,11 @@ def _get_relative_embeddings(self, relative_embeddings, length): slice_end_position = slice_start_position + 2 * length - 1 if pad_length > 0: padded_relative_embeddings = F.pad( - relative_embeddings, - convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]])) + relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]) + ) else: padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] + used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] return used_relative_embeddings def _relative_position_to_absolute_position(self, x): @@ -1096,14 +1212,14 @@ def _relative_position_to_absolute_position(self, x): """ batch, heads, length, _ = x.size() # Concat columns of pad to shift from relative to absolute indexing. - x = F.pad(x, convert_pad_shape([[0,0],[0,0],[0,0],[0,1]])) + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) # Concat extra elements so to add up to shape (len+1, 2*len-1). x_flat = x.view([batch, heads, length * 2 * length]) - x_flat = F.pad(x_flat, convert_pad_shape([[0,0],[0,0],[0,length-1]])) + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) # Reshape and slice out the padded elements. - x_final = x_flat.view([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :] return x_final def _absolute_position_to_relative_position(self, x): @@ -1113,11 +1229,11 @@ def _absolute_position_to_relative_position(self, x): """ batch, heads, length, _ = x.size() # padd along column - x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]])) - x_flat = x.view([batch, heads, length**2 + length*(length -1)]) + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) # add 0's in the beginning that will skew the elements after reshape x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) - x_final = x_flat.view([batch, heads, length, 2*length])[:,:,:,1:] + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] return x_final def _attention_bias_proximal(self, length): @@ -1133,7 +1249,9 @@ def _attention_bias_proximal(self, length): class FFN(nn.Module): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): + def __init__( + self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False + ): super().__init__() self.in_channels = in_channels self.out_channels = out_channels @@ -1178,4 +1296,4 @@ def _same_padding(self, x): pad_r = self.kernel_size // 2 padding = [[0, 0], [0, 0], [pad_l, pad_r]] x = F.pad(x, convert_pad_shape(padding)) - return x \ No newline at end of file + return x diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 9e257c8d7d2e..003b698b531d 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -207,7 +207,7 @@ def __init__( if isinstance(manifest_filepath, str): manifest_filepath = [manifest_filepath] self.manifest_filepath = manifest_filepath - self.lengths = [] # Needed for BucketSampling + self.lengths = [] # Needed for BucketSampling data = [] total_duration = 0 @@ -241,7 +241,7 @@ def __init__( data.append(file_info) self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) - + if file_info["duration"] is None: logging.info( "Not all audio files have duration information. Duration logging will be disabled." @@ -984,6 +984,7 @@ def __getitem__(self, index): def __len__(self): return len(self.data) + class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): """ Maintain similar input lengths in a batch. @@ -993,6 +994,7 @@ class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): It removes samples which are not included in the boundaries. Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded. """ + def __init__(self, dataset, batch_size, boundaries, num_replicas=None, rank=None, shuffle=True): super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle) self.lengths = dataset.lengths @@ -1014,7 +1016,7 @@ def _create_buckets(self): for i in range(len(buckets) - 1, 0, -1): if len(buckets[i]) == 0: buckets.pop(i) - self.boundaries.pop(i+1) + self.boundaries.pop(i + 1) num_samples_per_bucket = [] for i in range(len(buckets)): @@ -1025,58 +1027,58 @@ def _create_buckets(self): return buckets, num_samples_per_bucket def __iter__(self): - # deterministically shuffle based on epoch - g = torch.Generator() - g.manual_seed(self.epoch) - indices = [] - if self.shuffle: - for bucket in self.buckets: - indices.append(torch.randperm(len(bucket), generator=g).tolist()) - else: - for bucket in self.buckets: - indices.append(list(range(len(bucket)))) - - batches = [] - for i in range(len(self.buckets)): - bucket = self.buckets[i] - len_bucket = len(bucket) - ids_bucket = indices[i] - num_samples_bucket = self.num_samples_per_bucket[i] - - # add extra samples to make it evenly divisible - rem = num_samples_bucket - len_bucket - ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[:(rem % len_bucket)] - - # subsample - ids_bucket = ids_bucket[self.rank::self.num_replicas] - - # batching - for j in range(len(ids_bucket) // self.batch_size): - batch = [bucket[idx] for idx in ids_bucket[j*self.batch_size:(j+1)*self.batch_size]] - batches.append(batch) - - if self.shuffle: - batch_ids = torch.randperm(len(batches), generator=g).tolist() - batches = [batches[i] for i in batch_ids] - self.batches = batches - - assert len(self.batches) * self.batch_size == self.num_samples - return iter(self.batches) + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices = [] + if self.shuffle: + for bucket in self.buckets: + indices.append(torch.randperm(len(bucket), generator=g).tolist()) + else: + for bucket in self.buckets: + indices.append(list(range(len(bucket)))) + + batches = [] + for i in range(len(self.buckets)): + bucket = self.buckets[i] + len_bucket = len(bucket) + ids_bucket = indices[i] + num_samples_bucket = self.num_samples_per_bucket[i] + + # add extra samples to make it evenly divisible + rem = num_samples_bucket - len_bucket + ids_bucket = ids_bucket + ids_bucket * (rem // len_bucket) + ids_bucket[: (rem % len_bucket)] + + # subsample + ids_bucket = ids_bucket[self.rank :: self.num_replicas] + + # batching + for j in range(len(ids_bucket) // self.batch_size): + batch = [bucket[idx] for idx in ids_bucket[j * self.batch_size : (j + 1) * self.batch_size]] + batches.append(batch) + + if self.shuffle: + batch_ids = torch.randperm(len(batches), generator=g).tolist() + batches = [batches[i] for i in batch_ids] + self.batches = batches + + assert len(self.batches) * self.batch_size == self.num_samples + return iter(self.batches) def _bisect(self, x, lo=0, hi=None): - if hi is None: - hi = len(self.boundaries) - 1 - - if hi > lo: - mid = (hi + lo) // 2 - if self.boundaries[mid] < x and x <= self.boundaries[mid+1]: - return mid - elif x <= self.boundaries[mid]: - return self._bisect(x, lo, mid) - else: - return self._bisect(x, mid + 1, hi) - else: - return -1 + if hi is None: + hi = len(self.boundaries) - 1 + + if hi > lo: + mid = (hi + lo) // 2 + if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]: + return mid + elif x <= self.boundaries[mid]: + return self._bisect(x, lo, mid) + else: + return self._bisect(x, mid + 1, hi) + else: + return -1 def __len__(self): return self.num_samples // self.batch_size @@ -1089,4 +1091,4 @@ def set_epoch(self, epoch: int) -> None: Args: epoch (int): Epoch number. """ - self.epoch = epoch \ No newline at end of file + self.epoch = epoch From c299066bb34b68237ce9588e3436cf8ac437f596 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 7 Dec 2022 10:41:45 -0800 Subject: [PATCH 206/244] added copyright --- .../tts/modules/monotonic_align/numba_core.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index 1d23c232b572..83b5a03cd1b2 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -1,3 +1,18 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + import numba From 258dd679c4a1404fde3884e37fda0a85242045cb Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 7 Dec 2022 10:43:02 -0800 Subject: [PATCH 207/244] fixed imports --- nemo/collections/tts/models/vits.py | 1 - nemo/collections/tts/modules/vits_modules.py | 3 --- 2 files changed, 4 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index f36a1c3a2ff3..9b43845af552 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -21,7 +21,6 @@ from omegaconf import DictConfig, OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.loggers import WandbLogger -from pytorch_lightning.utilities import rank_zero_only from torch.cuda.amp import autocast from torch.nn import functional as F diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index d0069285c06d..312922c151ab 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -35,13 +35,10 @@ # SOFTWARE. import math - -import numpy as np import torch from torch import nn from torch.nn import Conv1d, ConvTranspose1d, Conv2d from torch.nn import functional as F -from librosa.filters import mel as librosa_mel_fn from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm from nemo.collections.tts.modules.hifigan_modules import ResBlock1, ResBlock2, init_weights, get_padding From 833a522a4bc75da25771fe489a1c9efb19cb14b5 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 18:47:15 +0000 Subject: [PATCH 208/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/modules/vits_modules.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index ae5e48ff4c30..78f51f0148e4 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -35,6 +35,7 @@ # SOFTWARE. import math + import torch from librosa.filters import mel as librosa_mel_fn from torch import nn From dae9c4f57fd0fb80c3e54753666dbf91f770c58b Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 7 Dec 2022 11:53:17 -0800 Subject: [PATCH 209/244] cleaning --- examples/tts/conf/vits.yaml | 1 - examples/tts/conf/vits_44100.yaml | 1 - examples/tts/vits.py | 2 - nemo/collections/tts/models/vits.py | 3 +- nemo/collections/tts/modules/vits_modules.py | 1 - nemo/collections/tts/torch/data.py | 46 +++++++++++++++++--- 6 files changed, 41 insertions(+), 13 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 2ba641ed7fac..276aa04d3f47 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -65,7 +65,6 @@ model: punct: true apostrophe: true pad_with_space: false - sep_with_space: false g2p: _target_: nemo_text_processing.g2p.modules.IPAG2P phoneme_dict: ${phoneme_dict_path} diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml index 2cdb727949ac..827696c9c743 100644 --- a/examples/tts/conf/vits_44100.yaml +++ b/examples/tts/conf/vits_44100.yaml @@ -62,7 +62,6 @@ model: punct: true apostrophe: true pad_with_space: false - sep_with_space: false g2p: _target_: nemo_text_processing.g2p.modules.IPAG2P phoneme_dict: ${phoneme_dict_path} diff --git a/examples/tts/vits.py b/examples/tts/vits.py index e4af05f0b9e6..ac966900ba47 100644 --- a/examples/tts/vits.py +++ b/examples/tts/vits.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pickle import FALSE - import pytorch_lightning as pl from nemo.collections.common.callbacks import LogEpochTimeCallback diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index d4ed8f1a5de2..b6d50cd16756 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -264,12 +264,13 @@ def training_step(self, batch, batch_idx): self.log_dict(metrics, on_step=True, sync_dist=True) def validation_step(self, batch, batch_idx): + speakers = None if self.cfg.n_speakers > 1: (audio, audio_len, text, text_len, speakers) = batch else: (audio, audio_len, text, text_len) = batch - audio_pred, attn, mask, *_ = self.net_g.infer(text, text_len, speakers, max_len=1000) + audio_pred, _, mask, *_ = self.net_g.infer(text, text_len, speakers, max_len=1000) audio_pred = audio_pred.squeeze() audio_pred_len = mask.sum([1, 2]).long() * self._cfg.validation_ds.dataset.hop_length diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index ae5e48ff4c30..836950ebba0a 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -36,7 +36,6 @@ import math import torch -from librosa.filters import mel as librosa_mel_fn from torch import nn from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 003b698b531d..ee12d4aeaaf8 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -164,7 +164,9 @@ def __init__( pitch_fmax (Optional[float]): The fmax input to librosa.pyin. Defaults to librosa.note_to_hz('C7'). pitch_mean (Optional[float]): The mean that we use to normalize the pitch. pitch_std (Optional[float]): The std that we use to normalize the pitch. - pitch_norm (Optional[bool]): Whether to normalize pitch (via pitch_mean and pitch_std) or not. + pitch_norm (Optional[bool]): Whether to normalize pitch or not. If True, requires providing either + pitch_stats_path or (pitch_mean and pitch_std). + pitch_stats_path (Optional[Path, str]): Path to file containing speaker level pitch statistics. """ super().__init__() @@ -418,6 +420,23 @@ def add_pitch(self, **kwargs): self.pitch_mean = kwargs.pop("pitch_mean", None) self.pitch_std = kwargs.pop("pitch_std", None) self.pitch_norm = kwargs.pop("pitch_norm", False) + pitch_stats_path = kwargs.pop("pitch_stats_path", None) + + if self.pitch_norm: + # XOR to validate that both or neither pitch mean and std are provided + assert (self.pitch_mean is None) == ( + self.pitch_std is None + ), f"Found only 1 of (pitch_mean, pitch_std): ({self.pitch_mean}, {self.pitch_std})" + + # XOR to validate that exactly 1 of (pitch_mean, pitch_std) or pitch_stats_path is provided. + assert (self.pitch_mean is None) != (pitch_stats_path is None), ( + f"pitch_norm requires exactly 1 of (pitch_mean, pitch_std) or pitch_stats_path. " + f"Provided: ({self.pitch_mean}, {self.pitch_std}) and {pitch_stats_path}" + ) + + if pitch_stats_path is not None: + with open(Path(pitch_stats_path), 'r', encoding="utf-8") as pitch_f: + self.pitch_stats = json.load(pitch_f) # saving voiced_mask and p_voiced with pitch def add_voiced_mask(self, **kwargs): @@ -556,12 +575,26 @@ def __getitem__(self, index): # normalize pitch if requested. if pitch is not None: - if self.pitch_mean is not None and self.pitch_std is not None and self.pitch_norm: - pitch -= self.pitch_mean - pitch[pitch == -self.pitch_mean] = 0.0 # Zero out values that were previously zero - pitch /= self.pitch_std - pitch_length = torch.tensor(len(pitch)).long() + if self.pitch_norm: + if self.pitch_mean is not None and self.pitch_std is not None: + sample_pitch_mean = self.pitch_mean + sample_pitch_std = self.pitch_std + elif self.pitch_stats: + if "speaker_id" in sample and str(sample["speaker_id"]) in self.pitch_stats: + pitch_stats = self.pitch_stats[str(sample["speaker_id"])] + elif "default" in self.pitch_stats: + pitch_stats = self.pitch_stats["default"] + else: + raise ValueError(f"Could not find pitch stats for {sample}.") + sample_pitch_mean = pitch_stats["pitch_mean"] + sample_pitch_std = pitch_stats["pitch_std"] + else: + raise ValueError(f"Missing statistics for pitch normalization.") + + pitch -= sample_pitch_mean + pitch[pitch == -sample_pitch_mean] = 0.0 # Zero out values that were previously zero + pitch /= sample_pitch_std # Load energy if needed energy, energy_length = None, None @@ -984,7 +1017,6 @@ def __getitem__(self, index): def __len__(self): return len(self.data) - class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): """ Maintain similar input lengths in a batch. From c7fee0a002452595a8cc6893e6a215d1918c0b16 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 19:57:32 +0000 Subject: [PATCH 210/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/torch/data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index ee12d4aeaaf8..4237016891a5 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -1017,6 +1017,7 @@ def __getitem__(self, index): def __len__(self): return len(self.data) + class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler): """ Maintain similar input lengths in a batch. From ed4b0b5bf3d943d7b2083eee0caa2732f86c3eec Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 7 Dec 2022 13:01:53 -0800 Subject: [PATCH 211/244] fixed filesize check --- nemo/collections/tts/torch/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index ee12d4aeaaf8..8735167abac6 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -242,7 +242,7 @@ def __init__( file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) data.append(file_info) - self.lengths.append(os.path.getsize(item["audio_filepath"]) // (2 * hop_length)) + self.lengths.append(os.path.getsize(item["audio_filepath"]) // (n_fft // 2)) if file_info["duration"] is None: logging.info( From 0414f422f25ee5fd5636e5857a6448b84e064dbd Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sun, 11 Dec 2022 12:37:43 -0800 Subject: [PATCH 212/244] last cleaning Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/losses/vits_losses.py | 9 --------- nemo/collections/tts/models/vits.py | 4 ---- 2 files changed, 13 deletions(-) diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 702873d1fa81..2cb54d33c119 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -38,7 +38,6 @@ # KlLoss import torch -from torch.autograd import Variable from nemo.core.classes import Loss, typecheck from nemo.core.neural_types.elements import LossType, VoidType @@ -133,8 +132,6 @@ def output_types(self): @typecheck() def forward(self, disc_real_outputs, disc_generated_outputs): - gen_loss = 0 - real_loss = 0 r_losses = [] g_losses = [] loss = 0 @@ -143,12 +140,6 @@ def forward(self, disc_real_outputs, disc_generated_outputs): dg = dg.float() r_loss = torch.mean((1 - dr) ** 2) g_loss = torch.mean(dg ** 2) - # real_loss += r_loss - # gen_loss += g_loss - # if i == 0: - # loss += torch.max(r_loss, g_loss) * 0.5 - # else: - # loss += torch.max(r_loss, g_loss) loss += r_loss + g_loss r_losses.append(r_loss.item()) g_losses.append(g_loss.item()) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index b6d50cd16756..20ab7c7db352 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -281,10 +281,6 @@ def validation_step(self, batch, batch_idx): # plot audio once per epoch if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB: logger = self.logger.experiment - # tokens = self.parse('I speak loud and clear').cuda() - # audio = self.convert_text_to_waveform(tokens=tokens) - # audio_len = torch.tensor(audio.size(-1)).unsqueeze(0).cuda() - # spec, _ = self.audio_to_melspec_processor(audio, audio_len) specs = [] audios = [] From d667dfbe1654bad096ce53db0c6a1395fdeaaa99 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sun, 11 Dec 2022 21:01:55 +0000 Subject: [PATCH 213/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- requirements/requirements.txt | 2 +- tools/speech_data_processor/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 6b0dc42aa1ea..2964747ec2b6 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -1,12 +1,12 @@ huggingface_hub numba numpy>=1.21 -setuptools==59.5.0 onnx>=1.7.0 python-dateutil ruamel.yaml scikit-learn setuptools==59.5.0 +setuptools==59.5.0 tensorboard text-unidecode torch diff --git a/tools/speech_data_processor/requirements.txt b/tools/speech_data_processor/requirements.txt index e07336a0d3c3..63904d71d9c9 100644 --- a/tools/speech_data_processor/requirements.txt +++ b/tools/speech_data_processor/requirements.txt @@ -1 +1 @@ -diff_match_patch \ No newline at end of file +diff_match_patch From 7e987809dc982afbfd22a0e8a4cba12096999d10 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sun, 11 Dec 2022 13:17:58 -0800 Subject: [PATCH 214/244] updated cmudict path --- examples/tts/conf/vits.yaml | 2 +- examples/tts/conf/vits_44100.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml index 276aa04d3f47..1002bdfe89f5 100644 --- a/examples/tts/conf/vits.yaml +++ b/examples/tts/conf/vits.yaml @@ -11,7 +11,7 @@ validation_datasets: ??? sup_data_path: null sup_data_types: null -phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.08.txt" +phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.10.txt" heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml index 827696c9c743..5eb3c98cff9a 100644 --- a/examples/tts/conf/vits_44100.yaml +++ b/examples/tts/conf/vits_44100.yaml @@ -23,7 +23,7 @@ lowfreq: 0 highfreq: null window: hann -phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.08.txt" +phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.10.txt" heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722" whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" From 67ba20a130668cd15b186aa90f1c57e87d9f7de4 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Sun, 11 Dec 2022 13:25:58 -0800 Subject: [PATCH 215/244] fixed merge bug Signed-off-by: Evgeniy Shabalin --- tests/collections/tts/test_tts_exportables.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/collections/tts/test_tts_exportables.py b/tests/collections/tts/test_tts_exportables.py index dca0bdb14b3e..c7083b45f6d7 100644 --- a/tests/collections/tts/test_tts_exportables.py +++ b/tests/collections/tts/test_tts_exportables.py @@ -73,19 +73,11 @@ def test_HifiGanModel_export_to_onnx(self, hifigan_model): filename = os.path.join(tmpdir, 'hfg.pt') model.export(output=filename, verbose=True, check_trace=True) -<<<<<<< HEAD -======= @pytest.mark.pleasefixme ->>>>>>> origin/main @pytest.mark.run_only_on('GPU') @pytest.mark.unit def test_RadTTSModel_export_to_torchscript(self, radtts_model): model = radtts_model.cuda() with tempfile.TemporaryDirectory() as tmpdir: filename = os.path.join(tmpdir, 'rad.ts') -<<<<<<< HEAD - with torch.cuda.amp.autocast(enabled=True): - model.export(output=filename, verbose=True, check_trace=True) -======= model.export(output=filename, verbose=True, check_trace=True) ->>>>>>> origin/main From c65b95649819f35e2e21039d9d1b695dfa367b36 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 12 Dec 2022 09:45:10 -0800 Subject: [PATCH 216/244] warnings fix --- nemo/collections/tts/models/vits.py | 2 ++ nemo/collections/tts/modules/vits_modules.py | 24 +++++++++----------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 20ab7c7db352..906524c29c13 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -152,6 +152,8 @@ def configure_optimizers(self): scheduler_d = CosineAnnealing( optimizer=optim_d, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr, ) + else: + raise ValueError("Unknown optimizer.") scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 566c6d0db059..7181f7e64ad0 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -37,7 +37,7 @@ import math import torch -from torch import nn +import torch.nn as nn from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm @@ -350,18 +350,16 @@ def forward(self, x, x_mask, g=None, reverse=False): unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels) unnormalized_derivatives = h[..., 2 * self.num_bins :] - if x1.size(0) != 0: - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound, - ) - else: - logdet = 0 + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound, + ) + x = torch.cat([x0, x1], 1) * x_mask logdet = torch.sum(logabsdet * x_mask, [1, 2]) if not reverse: From fe63ee59fcf5371fb8b72af0cc6f9ab19ec21743 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 12 Dec 2022 12:15:49 -0800 Subject: [PATCH 217/244] fix warnings Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/modules/vits_modules.py | 33 ++++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 7181f7e64ad0..99ef7c64c1f6 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -38,7 +38,6 @@ import torch import torch.nn as nn -from torch.nn import Conv1d, Conv2d, ConvTranspose1d from torch.nn import functional as F from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm @@ -607,14 +606,14 @@ def __init__( super(Generator, self).__init__() self.num_kernels = len(resblock_kernel_sizes) self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + self.conv_pre = nn.Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) resblock = ResBlock1 if resblock == '1' else ResBlock2 self.ups = nn.ModuleList() for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): self.ups.append( weight_norm( - ConvTranspose1d( + nn.ConvTranspose1d( upsample_initial_channel // (2 ** i), upsample_initial_channel // (2 ** (i + 1)), k, @@ -630,7 +629,7 @@ def __init__( for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): self.resblocks.append(resblock(ch, k, d)) - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.conv_post = nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False) self.ups.apply(init_weights) if gin_channels != 0: @@ -670,15 +669,15 @@ def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = nn.ModuleList( [ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), ] ) self.dropout = nn.Dropout(0.3) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) def forward(self, x): fmap = [] @@ -709,16 +708,16 @@ def __init__(self, use_spectral_norm=False): norm_f = weight_norm if use_spectral_norm == False else spectral_norm self.convs = nn.ModuleList( [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + norm_f(nn.Conv1d(1, 16, 15, 1, padding=7)), + norm_f(nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)), ] ) self.dropout = nn.Dropout(0.3) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1)) def forward(self, x): fmap = [] From 591d74a8102a7186ac3968013a5f3fdacfdbecc9 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 13 Dec 2022 03:13:38 -0800 Subject: [PATCH 218/244] storing --- examples/tts/conf/vits.yaml | 215 + examples/tts/conf/vits_44100.yaml | 213 + examples/tts/vits.py | 34 + nemo/collections/tts/losses/vits_losses.py | 176 + nemo/collections/tts/models/vits.py | 365 + .../tts/modules/monotonic_align/__init__.py | 103 + .../tts/modules/monotonic_align/core.c | 21336 ++++++++++++++++ .../tts/modules/monotonic_align/core.pyx | 42 + .../tts/modules/monotonic_align/numba_core.py | 67 + .../tts/modules/monotonic_align/setup.py | 45 + nemo/collections/tts/modules/vits_modules.py | 1294 + tutorials/nlp/Question_Answering_Squad.ipynb | 725 + 12 files changed, 24615 insertions(+) create mode 100644 examples/tts/conf/vits.yaml create mode 100644 examples/tts/conf/vits_44100.yaml create mode 100644 examples/tts/vits.py create mode 100644 nemo/collections/tts/losses/vits_losses.py create mode 100644 nemo/collections/tts/models/vits.py create mode 100644 nemo/collections/tts/modules/monotonic_align/__init__.py create mode 100644 nemo/collections/tts/modules/monotonic_align/core.c create mode 100644 nemo/collections/tts/modules/monotonic_align/core.pyx create mode 100644 nemo/collections/tts/modules/monotonic_align/numba_core.py create mode 100644 nemo/collections/tts/modules/monotonic_align/setup.py create mode 100644 nemo/collections/tts/modules/vits_modules.py create mode 100755 tutorials/nlp/Question_Answering_Squad.ipynb diff --git a/examples/tts/conf/vits.yaml b/examples/tts/conf/vits.yaml new file mode 100644 index 000000000000..1002bdfe89f5 --- /dev/null +++ b/examples/tts/conf/vits.yaml @@ -0,0 +1,215 @@ +# This config contains the default values for training VITS model on LJSpeech dataset. +# If you want to train model on other dataset, you can change config values according to your dataset. +# Most dataset-specific arguments are in the head of the config file, see below. + +# TODO: remove unnecessary arguments, refactoring + +name: VITS + +train_dataset: ??? +validation_datasets: ??? +sup_data_path: null +sup_data_types: null + +phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.10.txt" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722" +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" + +# Default values from librosa.pyin +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +sample_rate: 22050 +n_mel_channels: 80 +n_window_size: 1024 +n_window_stride: 256 +n_fft: 1024 +lowfreq: 0 +highfreq: null +window: hann + +model: + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + sample_rate: ${sample_rate} + n_mel_channels: ${n_mel_channels} + n_window_size: ${n_window_size} + n_window_stride: ${n_window_stride} + n_fft: ${n_fft} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + window: ${window} + mel_fmin: 0.0 + mel_fmax: null + + n_speakers: 0 + segment_size: 8192 + c_mel: 45 + c_kl: 1. + use_spectral_norm: false + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.IPATokenizer + punct: true + apostrophe: true + pad_with_space: false + g2p: + _target_: nemo_text_processing.g2p.modules.IPAG2P + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + phoneme_probability: 0.8 + # Relies on the heteronyms list for anything that needs to be disambiguated + ignore_ambiguous_words: false + use_chars: true + use_stresses: true + + train_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} + + dataloader_params: + num_workers: 8 + pin_memory: false + + batch_sampler: + batch_size: 32 + boundaries: [32,300,400,500,600,700,800,900,1000] + num_replicas: ${trainer.devices} + shuffle: true + + validation_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${validation_datasets} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} + + dataloader_params: + drop_last: false + shuffle: false + batch_size: 16 + num_workers: 4 + pin_memory: false + + preprocessor: + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures + nfilt: ${model.n_mel_channels} + highfreq: ${model.highfreq} + log: true + log_zero_guard_type: clamp + log_zero_guard_value: 1e-05 + lowfreq: ${model.lowfreq} + n_fft: ${model.n_fft} + n_window_size: ${model.n_window_size} + n_window_stride: ${model.n_window_stride} + pad_to: 1 + pad_value: 0 + sample_rate: ${model.sample_rate} + window: ${model.window} + normalize: null + preemph: null + dither: 0.0 + frame_splicing: 1 + stft_conv: false + nb_augmentation_prob : 0 + mag_power: 1.0 + exact_pad: true + use_grads: true + + synthesizer: + _target_: nemo.collections.tts.modules.vits_modules.SynthesizerTrn + inter_channels: 192 + hidden_channels: 192 + filter_channels: 768 + n_heads: 2 + n_layers: 6 + kernel_size: 3 + p_dropout: 0.1 + resblock: "1" + resblock_kernel_sizes: [3,7,11] + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_rates: [8,8,2,2] + upsample_initial_channel: 512 + upsample_kernel_sizes: [16,16,4,4] + n_speakers: ${model.n_speakers} + gin_channels: 256 # for multi-speaker + + optim: + _target_: torch.optim.AdamW + lr: 2e-4 + betas: [0.9, 0.99] + eps: 1e-9 + + sched: + name: ExponentialLR + lr_decay: 0.999875 + +trainer: + num_nodes: 1 + devices: 2 + accelerator: gpu + strategy: ddp + precision: 32 + # amp_backend: 'apex' + # amp_level: 'O2' + # benchmark: true + max_epochs: -1 + accumulate_grad_batches: 1 + enable_checkpointing: false # Provided by exp_manager + logger: false # Provided by exp_manager + log_every_n_steps: 50 + check_val_every_n_epoch: 1 + +exp_manager: + exp_dir: ??? + name: ${name} + create_tensorboard_logger: true + create_checkpoint_callback: true + checkpoint_callback_params: + monitor: loss_gen_all + mode: min + resume_if_exists: false + resume_ignore_no_checkpoint: false diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml new file mode 100644 index 000000000000..5eb3c98cff9a --- /dev/null +++ b/examples/tts/conf/vits_44100.yaml @@ -0,0 +1,213 @@ +# This config contains the default values for training VITS model on LJSpeech dataset. +# If you want to train model on other dataset, you can change config values according to your dataset. +# Most dataset-specific arguments are in the head of the config file, see below. + +# TODO: remove unnecessary arguments, refactoring + +name: VITS + +train_dataset: ??? +validation_datasets: ??? +sup_data_path: ??? +sup_data_types: [speaker_id] + +pitch_fmin: 65.40639132514966 +pitch_fmax: 2093.004522404789 + +sample_rate: 44100 +n_mel_channels: 80 +n_window_size: 2048 +n_window_stride: 512 +n_fft: 2048 +lowfreq: 0 +highfreq: null +window: hann + +phoneme_dict_path: "scripts/tts_dataset_files/ipa_cmudict-0.7b_nv22.10.txt" +heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722" +whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv" + +model: + n_speakers: 13000 + segment_size: 16384 + c_mel: 45 + c_kl: 1. + use_spectral_norm: false + + pitch_fmin: ${pitch_fmin} + pitch_fmax: ${pitch_fmax} + + sample_rate: ${sample_rate} + n_mel_channels: ${n_mel_channels} + n_window_size: ${n_window_size} + n_window_stride: ${n_window_stride} + n_fft: ${n_fft} + lowfreq: ${lowfreq} + highfreq: ${highfreq} + window: ${window} + + text_normalizer: + _target_: nemo_text_processing.text_normalization.normalize.Normalizer + lang: en + input_case: cased + whitelist: ${whitelist_path} + + text_normalizer_call_kwargs: + verbose: false + punct_pre_process: true + punct_post_process: true + + text_tokenizer: + _target_: nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.IPATokenizer + punct: true + apostrophe: true + pad_with_space: false + g2p: + _target_: nemo_text_processing.g2p.modules.IPAG2P + phoneme_dict: ${phoneme_dict_path} + heteronyms: ${heteronyms_path} + phoneme_probability: 0.8 + # Relies on the heteronyms list for anything that needs to be disambiguated + ignore_ambiguous_words: false + use_chars: true + use_stresses: true + + train_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${train_dataset} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} + + dataloader_params: + num_workers: 8 + pin_memory: false + + batch_sampler: + batch_size: 2 + boundaries: [32,300,400,500,600,700,800,900,1000] + num_replicas: ${trainer.devices} + shuffle: true + + validation_ds: + dataset: + _target_: "nemo.collections.tts.torch.data.TTSDataset" + manifest_filepath: ${validation_datasets} + sample_rate: ${model.sample_rate} + sup_data_path: ${sup_data_path} + sup_data_types: ${sup_data_types} + n_fft: ${model.n_fft} + win_length: ${model.n_window_size} + hop_length: ${model.n_window_stride} + window: ${model.window} + n_mels: ${model.n_mel_channels} + lowfreq: ${model.lowfreq} + highfreq: ${model.highfreq} + max_duration: null + min_duration: 0.1 + ignore_file: null + trim: False + pitch_fmin: ${model.pitch_fmin} + pitch_fmax: ${model.pitch_fmax} + + dataloader_params: + drop_last: false + shuffle: false + batch_size: 2 + num_workers: 4 + pin_memory: false + + preprocessor: + _target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures + nfilt: ${model.n_mel_channels} + highfreq: ${model.highfreq} + log: true + log_zero_guard_type: clamp + log_zero_guard_value: 1e-05 + lowfreq: ${model.lowfreq} + n_fft: ${model.n_fft} + n_window_size: ${model.n_window_size} + n_window_stride: ${model.n_window_stride} + pad_to: 1 + pad_value: 0 + sample_rate: ${model.sample_rate} + window: ${model.window} + normalize: null + preemph: null + dither: 0.0 + frame_splicing: 1 + stft_conv: false + nb_augmentation_prob : 0 + mag_power: 1.0 + exact_pad: true + use_grads: true + + synthesizer: + _target_: nemo.collections.tts.modules.vits_modules.SynthesizerTrn + inter_channels: 192 + hidden_channels: 192 + filter_channels: 768 + n_heads: 2 + n_layers: 6 + kernel_size: 3 + p_dropout: 0.1 + resblock: "1" + resblock_kernel_sizes: [3,7,11] + resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]] + upsample_rates: [8,8,4,2] + upsample_initial_channel: 512 + upsample_kernel_sizes: [16,16,4,4] + n_speakers: ${model.n_speakers} + gin_channels: 256 # for multi-speaker + + optim: + _target_: torch.optim.AdamW + lr: 2e-4 + betas: [0.9, 0.99] + eps: 1e-9 + + sched: + name: CosineAnnealing + max_steps: 1000000 + min_lr: 1e-5 + +trainer: + num_nodes: 1 + devices: 2 + accelerator: gpu + strategy: ddp + precision: 32 + # amp_backend: 'apex' + # amp_level: 'O2' + # benchmark: true + max_epochs: -1 + accumulate_grad_batches: 1 + enable_checkpointing: false # Provided by exp_manager + logger: false # Provided by exp_manager + log_every_n_steps: 50 + check_val_every_n_epoch: 1 + +exp_manager: + exp_dir: ??? + name: ${name} + create_tensorboard_logger: true + create_checkpoint_callback: true + checkpoint_callback_params: + monitor: loss_gen_all + mode: min + resume_if_exists: false + resume_ignore_no_checkpoint: false diff --git a/examples/tts/vits.py b/examples/tts/vits.py new file mode 100644 index 000000000000..ac966900ba47 --- /dev/null +++ b/examples/tts/vits.py @@ -0,0 +1,34 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytorch_lightning as pl + +from nemo.collections.common.callbacks import LogEpochTimeCallback +from nemo.collections.tts.models.vits import VitsModel +from nemo.core.config import hydra_runner +from nemo.utils.exp_manager import exp_manager + + +@hydra_runner(config_path="conf", config_name="vits") +def main(cfg): + trainer = pl.Trainer(replace_sampler_ddp=False, **cfg.trainer) + exp_manager(trainer, cfg.get("exp_manager", None)) + model = VitsModel(cfg=cfg.model, trainer=trainer) + + trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) + trainer.fit(model) + + +if __name__ == '__main__': + main() # noqa pylint: disable=no-value-for-parameter diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py new file mode 100644 index 000000000000..2cb54d33c119 --- /dev/null +++ b/nemo/collections/tts/losses/vits_losses.py @@ -0,0 +1,176 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# The forward functions of the following classes are based on code from https://github.com/jaywalnut310/vits: +# KlLoss + +import torch + +from nemo.core.classes import Loss, typecheck +from nemo.core.neural_types.elements import LossType, VoidType +from nemo.core.neural_types.neural_type import NeuralType + + +class KlLoss(Loss): + @property + def input_types(self): + return { + "z_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "logs_q": [NeuralType(('B', 'D', 'T'), VoidType())], + "m_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "logs_p": [NeuralType(('B', 'D', 'T'), VoidType())], + "z_mask": [NeuralType(('B', 'D', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + @typecheck() + def forward(self, z_p, logs_q, m_p, logs_p, z_mask): + """ + z_p, logs_q: [b, h, t_t] + m_p, logs_p: [b, h, t_t] + """ + z_p = z_p.float() + logs_q = logs_q.float() + m_p = m_p.float() + logs_p = logs_p.float() + z_mask = z_mask.float() + + kl = logs_p - logs_q - 0.5 + kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p) + kl = torch.sum(kl * z_mask) + l = kl / torch.sum(z_mask) + return l + + +class FeatureMatchingLoss(Loss): + """VITS Feature Matching Loss module""" + + @property + def input_types(self): + return { + "fmap_r": [[NeuralType(elements_type=VoidType())]], + "fmap_g": [[NeuralType(elements_type=VoidType())]], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + } + + @typecheck() + def forward(self, fmap_r, fmap_g): + """ + fmap_r, fmap_g: List[List[Tensor]] + """ + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + rl = rl.float().detach() + gl = gl.float() + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +class DiscriminatorLoss(Loss): + """Discriminator Loss module""" + + @property + def input_types(self): + return { + "disc_real_outputs": [NeuralType(('B', 'T'), VoidType())], + "disc_generated_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "real_loss": NeuralType(elements_type=LossType()), + # "gen_loss": NeuralType(elements_type=LossType()), + "real_losses": [NeuralType(elements_type=LossType())], + "fake_losses": [NeuralType(elements_type=LossType())], + } + + @typecheck() + def forward(self, disc_real_outputs, disc_generated_outputs): + r_losses = [] + g_losses = [] + loss = 0 + for i, (dr, dg) in enumerate(zip(disc_real_outputs, disc_generated_outputs)): + dr = dr.float() + dg = dg.float() + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg ** 2) + loss += r_loss + g_loss + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +class GeneratorLoss(Loss): + """Generator Loss module""" + + @property + def input_types(self): + return { + "disc_outputs": [NeuralType(('B', 'T'), VoidType())], + } + + @property + def output_types(self): + return { + "loss": NeuralType(elements_type=LossType()), + "fake_losses": [NeuralType(elements_type=LossType())], + } + + @typecheck() + def forward(self, disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + dg = dg.float() + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py new file mode 100644 index 000000000000..906524c29c13 --- /dev/null +++ b/nemo/collections/tts/models/vits.py @@ -0,0 +1,365 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import contextlib + +import omegaconf +import torch +import wandb +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf +from pytorch_lightning import Trainer +from pytorch_lightning.loggers import WandbLogger +from torch.cuda.amp import autocast +from torch.nn import functional as F + +from nemo.collections.tts.helpers.helpers import clip_grad_value_, plot_spectrogram_to_numpy, slice_segments +from nemo.collections.tts.losses.vits_losses import DiscriminatorLoss, FeatureMatchingLoss, GeneratorLoss, KlLoss +from nemo.collections.tts.models.base import TextToWaveform +from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator +from nemo.collections.tts.torch.data import DistributedBucketSampler +from nemo.collections.tts.torch.tts_data_types import SpeakerID +from nemo.core.classes.common import PretrainedModelInfo +from nemo.core.optim.lr_scheduler import CosineAnnealing +from nemo.utils import logging, model_utils + +HAVE_WANDB = True +try: + import wandb +except ModuleNotFoundError: + HAVE_WANDB = False + + +class VitsModel(TextToWaveform): + def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): + # Convert to Hydra 1.0 compatible DictConfig + + cfg = model_utils.convert_model_config_to_dict_config(cfg) + cfg = model_utils.maybe_update_config_version(cfg) + + # setup normalizer + self.normalizer = None + self.text_normalizer_call = None + self.text_normalizer_call_kwargs = {} + self._setup_normalizer(cfg) + + # setup tokenizer + self.tokenizer = None + self._setup_tokenizer(cfg) + assert self.tokenizer is not None + + num_tokens = len(self.tokenizer.tokens) + self.tokenizer_pad = self.tokenizer.pad + + super().__init__(cfg=cfg, trainer=trainer) + + self.audio_to_melspec_processor = instantiate(cfg.preprocessor, highfreq=cfg.train_ds.dataset.highfreq) + + self.feat_matching_loss = FeatureMatchingLoss() + self.disc_loss = DiscriminatorLoss() + self.gen_loss = GeneratorLoss() + self.kl_loss = KlLoss() + + self.net_g = instantiate( + cfg.synthesizer, + n_vocab=num_tokens, + spec_channels=cfg.n_fft // 2 + 1, + segment_size=cfg.segment_size // cfg.n_window_stride, + padding_idx=self.tokenizer_pad, + ) + + self.net_d = MultiPeriodDiscriminator(cfg.use_spectral_norm) + + self.automatic_optimization = False + + def _setup_normalizer(self, cfg): + if "text_normalizer" in cfg: + normalizer_kwargs = {} + + if "whitelist" in cfg.text_normalizer: + normalizer_kwargs["whitelist"] = self.register_artifact( + 'text_normalizer.whitelist', cfg.text_normalizer.whitelist + ) + + self.normalizer = instantiate(cfg.text_normalizer, **normalizer_kwargs) + self.text_normalizer_call = self.normalizer.normalize + if "text_normalizer_call_kwargs" in cfg: + self.text_normalizer_call_kwargs = cfg.text_normalizer_call_kwargs + + def _setup_tokenizer(self, cfg): + text_tokenizer_kwargs = {} + if "g2p" in cfg.text_tokenizer and cfg.text_tokenizer.g2p is not None: + g2p_kwargs = {} + + if "phoneme_dict" in cfg.text_tokenizer.g2p: + g2p_kwargs["phoneme_dict"] = self.register_artifact( + 'text_tokenizer.g2p.phoneme_dict', cfg.text_tokenizer.g2p.phoneme_dict, + ) + + if "heteronyms" in cfg.text_tokenizer.g2p: + g2p_kwargs["heteronyms"] = self.register_artifact( + 'text_tokenizer.g2p.heteronyms', cfg.text_tokenizer.g2p.heteronyms, + ) + + text_tokenizer_kwargs["g2p"] = instantiate(cfg.text_tokenizer.g2p, **g2p_kwargs) + + self.tokenizer = instantiate(cfg.text_tokenizer, **text_tokenizer_kwargs) + + def parse(self, text: str, normalize=True) -> torch.tensor: + if self.training: + logging.warning("parse() is meant to be called in eval mode.") + if normalize and self.text_normalizer_call is not None: + text = self.text_normalizer_call(text, **self.text_normalizer_call_kwargs) + + eval_phon_mode = contextlib.nullcontext() + if hasattr(self.tokenizer, "set_phone_prob"): + eval_phon_mode = self.tokenizer.set_phone_prob(prob=1.0) + + with eval_phon_mode: + tokens = self.tokenizer.encode(text) + + return torch.tensor(tokens).long().unsqueeze(0).to(self.device) + + def configure_optimizers(self): + optim_config = self._cfg.optim.copy() + OmegaConf.set_struct(optim_config, False) + sched_config = optim_config.pop("sched", None) + OmegaConf.set_struct(optim_config, True) + + optim_g = instantiate(optim_config, params=self.net_g.parameters(),) + optim_d = instantiate(optim_config, params=self.net_d.parameters(),) + + if sched_config is not None: + if sched_config.name == 'ExponentialLR': + scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=sched_config.lr_decay) + scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=sched_config.lr_decay) + elif sched_config.name == 'CosineAnnealing': + scheduler_g = CosineAnnealing( + optimizer=optim_g, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr, + ) + scheduler_d = CosineAnnealing( + optimizer=optim_d, max_steps=sched_config.max_steps, min_lr=sched_config.min_lr, + ) + else: + raise ValueError("Unknown optimizer.") + + scheduler_g_dict = {'scheduler': scheduler_g, 'interval': 'step'} + scheduler_d_dict = {'scheduler': scheduler_d, 'interval': 'step'} + return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] + else: + return [optim_g, optim_d] + + # for inference + def forward(self, tokens, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=1000): + text_len = torch.tensor([tokens.size(-1)]).to(int).to(tokens.device) + audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) = self.net_g.infer( + tokens, + text_len, + sid=sid, + noise_scale=noise_scale, + length_scale=length_scale, + noise_scale_w=noise_scale_w, + max_len=max_len, + ) + return audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) + + def training_step(self, batch, batch_idx): + speakers = None + if SpeakerID in self._train_dl.dataset.sup_data_types_set: + (audio, audio_len, text, text_len, speakers) = batch + else: + (audio, audio_len, text, text_len) = batch + + spec, spec_lengths = self.audio_to_melspec_processor(audio, audio_len, linear_spec=True) + + with autocast(enabled=True): + audio_pred, l_length, attn, ids_slice, text_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q) = self.net_g( + text, text_len, spec, spec_lengths, speakers + ) + + audio_pred = audio_pred.float() + + audio_pred_mel, _ = self.audio_to_melspec_processor(audio_pred.squeeze(1), audio_len, linear_spec=False) + + audio = slice_segments(audio.unsqueeze(1), ids_slice * self.cfg.n_window_stride, self._cfg.segment_size) + audio_mel, _ = self.audio_to_melspec_processor(audio.squeeze(1), audio_len, linear_spec=False) + + with autocast(enabled=True): + y_d_hat_r, y_d_hat_g, _, _ = self.net_d(audio, audio_pred.detach()) + + with autocast(enabled=False): + loss_disc, losses_disc_r, losses_disc_g = self.disc_loss( + disc_real_outputs=y_d_hat_r, disc_generated_outputs=y_d_hat_g + ) + loss_disc_all = loss_disc + + # get optimizers + optim_g, optim_d = self.optimizers() + + # train discriminator + optim_d.zero_grad() + self.manual_backward(loss_disc_all) + norm_d = clip_grad_value_(self.net_d.parameters(), None) + optim_d.step() + + with autocast(enabled=True): + y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = self.net_d(audio, audio_pred) + # Generator + with autocast(enabled=False): + loss_dur = torch.sum(l_length.float()) + loss_mel = F.l1_loss(audio_mel, audio_pred_mel) * self._cfg.c_mel + loss_kl = self.kl_loss(z_p=z_p, logs_q=logs_q, m_p=m_p, logs_p=logs_p, z_mask=z_mask) * self._cfg.c_kl + loss_fm = self.feat_matching_loss(fmap_r=fmap_r, fmap_g=fmap_g) + loss_gen, losses_gen = self.gen_loss(disc_outputs=y_d_hat_g) + loss_gen_all = loss_gen + loss_fm + loss_mel + loss_dur + loss_kl + + # train generator + optim_g.zero_grad() + self.manual_backward(loss_gen_all) + norm_g = clip_grad_value_(self.net_g.parameters(), None) + optim_g.step() + + schedulers = self.lr_schedulers() + if schedulers is not None: + sch1, sch2 = schedulers + if ( + self.trainer.is_last_batch + and isinstance(sch1, torch.optim.lr_scheduler.ExponentialLR) + or isinstance(sch1, CosineAnnealing) + ): + sch1.step() + sch2.step() + + metrics = { + "loss_gen": loss_gen, + "loss_fm": loss_fm, + "loss_mel * c_mel": loss_mel, + "loss_dur": loss_dur, + "loss_kl * c_kl": loss_kl, + "loss_gen_all": loss_gen_all, + "loss_disc_all": loss_disc_all, + "grad_gen": norm_g, + "grad_disc": norm_d, + } + + for i, v in enumerate(losses_gen): + metrics[f"loss_gen_i_{i}"] = v + + for i, v in enumerate(losses_disc_r): + metrics[f"loss_disc_r_{i}"] = v + + for i, v in enumerate(losses_disc_g): + metrics[f"loss_disc_g_{i}"] = v + + self.log_dict(metrics, on_step=True, sync_dist=True) + + def validation_step(self, batch, batch_idx): + speakers = None + if self.cfg.n_speakers > 1: + (audio, audio_len, text, text_len, speakers) = batch + else: + (audio, audio_len, text, text_len) = batch + + audio_pred, _, mask, *_ = self.net_g.infer(text, text_len, speakers, max_len=1000) + + audio_pred = audio_pred.squeeze() + audio_pred_len = mask.sum([1, 2]).long() * self._cfg.validation_ds.dataset.hop_length + + mel, mel_lengths = self.audio_to_melspec_processor(audio, audio_len) + audio_pred_mel, audio_pred_mel_len = self.audio_to_melspec_processor(audio_pred, audio_pred_len) + + # plot audio once per epoch + if batch_idx == 0 and isinstance(self.logger, WandbLogger) and HAVE_WANDB: + logger = self.logger.experiment + + specs = [] + audios = [] + specs += [ + wandb.Image( + plot_spectrogram_to_numpy(mel[0, :, : mel_lengths[0]].data.cpu().numpy()), + caption=f"val_mel_target", + ), + wandb.Image( + plot_spectrogram_to_numpy(audio_pred_mel[0, :, : audio_pred_mel_len[0]].data.cpu().numpy()), + caption=f"val_mel_predicted", + ), + ] + + audios += [ + wandb.Audio( + audio[0, : audio_len[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_target", + sample_rate=self._cfg.sample_rate, + ), + wandb.Audio( + audio_pred[0, : audio_pred_len[0]].data.cpu().to(torch.float).numpy(), + caption=f"val_wav_predicted", + sample_rate=self._cfg.sample_rate, + ), + ] + + logger.log({"specs": specs, "audios": audios}) + + def _loader(self, cfg): + try: + _ = cfg['dataset']['manifest_filepath'] + except omegaconf.errors.MissingMandatoryValue: + logging.warning("manifest_filepath was skipped. No dataset for this model.") + return None + + dataset = instantiate( + cfg.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + return torch.utils.data.DataLoader( # noqa + dataset=dataset, collate_fn=dataset.collate_fn, **cfg.dataloader_params, + ) + + def train_dataloader(self): + # default used by the Trainer + dataset = instantiate( + self.cfg.train_ds.dataset, + text_normalizer=self.normalizer, + text_normalizer_call_kwargs=self.text_normalizer_call_kwargs, + text_tokenizer=self.tokenizer, + ) + + train_sampler = DistributedBucketSampler(dataset, **self.cfg.train_ds.batch_sampler) + + dataloader = torch.utils.data.DataLoader( + dataset, collate_fn=dataset.collate_fn, batch_sampler=train_sampler, **self.cfg.train_ds.dataloader_params, + ) + return dataloader + + def setup_training_data(self, cfg): + self._train_dl = self._loader(cfg) + + def setup_validation_data(self, cfg): + self._validation_dl = self._loader(cfg) + + def setup_test_data(self, cfg): + """Omitted.""" + pass + + @classmethod + def list_available_models(cls) -> 'List[PretrainedModelInfo]': + list_of_models = [] + # TODO: List available models?? + return list_of_models + + def convert_text_to_waveform(self, *, tokens, sid=None): + return self(tokens, sid=sid)[0].squeeze(1) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py new file mode 100644 index 000000000000..2f8f95d68eb3 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import numba +import numpy as np +import torch + +# from .numba_core import maximum_path_c + + +def maximum_path(neg_cent, mask): + """ Cython optimized version. + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) + + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): + """ + Args: + path: int32[:, :] + value: float32[:, :] + t_y: int + t_x: int + max_neg_val: float + """ + index: int = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0.0 + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_c(paths, values, t_ys, t_xs): + """ + Args: + paths: int32[:, :, :] + values: float32[:, :, :] + t_ys: int[:] + t_xs: int[:] + """ + b: int = paths.shape[0] + for i in numba.prange(b): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/monotonic_align/core.c b/nemo/collections/tts/modules/monotonic_align/core.c new file mode 100644 index 000000000000..2e21659560ca --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/core.c @@ -0,0 +1,21336 @@ +// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// MIT License +// +// Copyright (c) 2021 Jaehyeon Kim +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + + +/* Generated by Cython 0.29.21 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "name": "monotonic_align.core", + "sources": [ + "core.pyx" + ] + }, + "module_name": "monotonic_align.core" +} +END: Cython Metadata */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_21" +#define CYTHON_HEX_VERSION 0x001D15F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" +#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__monotonic_align__core +#define __PYX_HAVE_API__monotonic_align__core +/* Early includes */ +#include "pythread.h" +#include +#include +#include +#include "pystate.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "core.pyx", + "stringsource", +}; +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* MemviewSliceStruct.proto */ +struct __pyx_memoryview_obj; +typedef struct { + struct __pyx_memoryview_obj *memview; + char *data; + Py_ssize_t shape[8]; + Py_ssize_t strides[8]; + Py_ssize_t suboffsets[8]; +} __Pyx_memviewslice; +#define __Pyx_MemoryView_Len(m) (m.shape[0]) + +/* Atomics.proto */ +#include +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +#define __pyx_atomic_int_type int +#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ + !defined(__i386__) + #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type LONG + #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 + #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) + #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using Intel atomics" + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif +typedef volatile __pyx_atomic_int_type __pyx_atomic_int; +#if CYTHON_ATOMICS + #define __pyx_add_acquisition_count(memview)\ + __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) +#else + #define __pyx_add_acquisition_count(memview)\ + __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) +#endif + +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/* BufferFormatStructs.proto */ +#define IS_UNSIGNED(type) (((type) -1) > 0) +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + + +/*--- Type declarations ---*/ +struct __pyx_array_obj; +struct __pyx_MemviewEnum_obj; +struct __pyx_memoryview_obj; +struct __pyx_memoryviewslice_obj; +struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; + +/* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ +struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { + int __pyx_n; + float max_neg_val; +}; + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ +struct __pyx_array_obj { + PyObject_HEAD + struct __pyx_vtabstruct_array *__pyx_vtab; + char *data; + Py_ssize_t len; + char *format; + int ndim; + Py_ssize_t *_shape; + Py_ssize_t *_strides; + Py_ssize_t itemsize; + PyObject *mode; + PyObject *_format; + void (*callback_free_data)(void *); + int free_data; + int dtype_is_object; +}; + + +/* "View.MemoryView":279 + * + * @cname('__pyx_MemviewEnum') + * cdef class Enum(object): # <<<<<<<<<<<<<< + * cdef object name + * def __init__(self, name): + */ +struct __pyx_MemviewEnum_obj { + PyObject_HEAD + PyObject *name; +}; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ +struct __pyx_memoryview_obj { + PyObject_HEAD + struct __pyx_vtabstruct_memoryview *__pyx_vtab; + PyObject *obj; + PyObject *_size; + PyObject *_array_interface; + PyThread_type_lock lock; + __pyx_atomic_int acquisition_count[2]; + __pyx_atomic_int *acquisition_count_aligned_p; + Py_buffer view; + int flags; + int dtype_is_object; + __Pyx_TypeInfo *typeinfo; +}; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ +struct __pyx_memoryviewslice_obj { + struct __pyx_memoryview_obj __pyx_base; + __Pyx_memviewslice from_slice; + PyObject *from_object; + PyObject *(*to_object_func)(char *); + int (*to_dtype_func)(char *, PyObject *); +}; + + + +/* "View.MemoryView":105 + * + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ + +struct __pyx_vtabstruct_array { + PyObject *(*get_memview)(struct __pyx_array_obj *); +}; +static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; + + +/* "View.MemoryView":330 + * + * @cname('__pyx_memoryview') + * cdef class memoryview(object): # <<<<<<<<<<<<<< + * + * cdef object obj + */ + +struct __pyx_vtabstruct_memoryview { + char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); + PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); +}; +static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; + + +/* "View.MemoryView":965 + * + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ + +struct __pyx_vtabstruct__memoryviewslice { + struct __pyx_vtabstruct_memoryview __pyx_base; +}; +static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* MemviewSliceInit.proto */ +#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d +#define __Pyx_MEMVIEW_DIRECT 1 +#define __Pyx_MEMVIEW_PTR 2 +#define __Pyx_MEMVIEW_FULL 4 +#define __Pyx_MEMVIEW_CONTIG 8 +#define __Pyx_MEMVIEW_STRIDED 16 +#define __Pyx_MEMVIEW_FOLLOW 32 +#define __Pyx_IS_C_CONTIG 1 +#define __Pyx_IS_F_CONTIG 2 +static int __Pyx_init_memviewslice( + struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference); +static CYTHON_INLINE int __pyx_add_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( + __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); +#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) +#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) +#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) +#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) +static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* None.proto */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* IncludeStringH.proto */ +#include + +/* BytesEquals.proto */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* StrEquals.proto */ +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + +/* None.proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); + +/* UnaryNegOverflows.proto */ +#define UNARY_NEG_WOULD_OVERFLOW(x)\ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) + +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* decode_c_string_utf16.proto */ +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 0; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = -1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} +static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { + int byteorder = 1; + return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); +} + +/* decode_c_string.proto */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PyIntBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) +#endif + +/* ListExtend.proto */ +static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { +#if CYTHON_COMPILING_IN_CPYTHON + PyObject* none = _PyList_Extend((PyListObject*)L, v); + if (unlikely(!none)) + return -1; + Py_DECREF(none); + return 0; +#else + return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); +#endif +} + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* None.proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* HasAttr.proto */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable); + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +/* MemviewSliceIsContig.proto */ +static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); + +/* OverlappingSlices.proto */ +static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize); + +/* Capsule.proto */ +static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* TypeInfoCompare.proto */ +static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); + +/* MemviewSliceValidateAndInit.proto */ +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* MemviewSliceCopyTemplate.proto */ +static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ + +/* Module declarations from 'cython.view' */ + +/* Module declarations from 'cython' */ + +/* Module declarations from 'monotonic_align.core' */ +static PyTypeObject *__pyx_array_type = 0; +static PyTypeObject *__pyx_MemviewEnum_type = 0; +static PyTypeObject *__pyx_memoryview_type = 0; +static PyTypeObject *__pyx_memoryviewslice_type = 0; +static PyObject *generic = 0; +static PyObject *strided = 0; +static PyObject *indirect = 0; +static PyObject *contiguous = 0; +static PyObject *indirect_contiguous = 0; +static int __pyx_memoryview_thread_locks_used; +static PyThread_type_lock __pyx_memoryview_thread_locks[8]; +static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ +static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ +static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ +static void *__pyx_align_pointer(void *, size_t); /*proto*/ +static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ +static PyObject *_unellipsify(PyObject *, int); /*proto*/ +static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ +static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ +static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ +static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ +static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ +static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ +static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ +static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ +static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; +static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; +#define __Pyx_MODULE_NAME "monotonic_align.core" +extern int __pyx_module_is_main_monotonic_align__core; +int __pyx_module_is_main_monotonic_align__core = 0; + +/* Implementation of 'monotonic_align.core' */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_enumerate; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_Ellipsis; +static PyObject *__pyx_builtin_id; +static PyObject *__pyx_builtin_IndexError; +static const char __pyx_k_O[] = "O"; +static const char __pyx_k_c[] = "c"; +static const char __pyx_k_id[] = "id"; +static const char __pyx_k_new[] = "__new__"; +static const char __pyx_k_obj[] = "obj"; +static const char __pyx_k_base[] = "base"; +static const char __pyx_k_dict[] = "__dict__"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mode[] = "mode"; +static const char __pyx_k_name[] = "name"; +static const char __pyx_k_ndim[] = "ndim"; +static const char __pyx_k_pack[] = "pack"; +static const char __pyx_k_size[] = "size"; +static const char __pyx_k_step[] = "step"; +static const char __pyx_k_stop[] = "stop"; +static const char __pyx_k_t_xs[] = "t_xs"; +static const char __pyx_k_t_ys[] = "t_ys"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_ASCII[] = "ASCII"; +static const char __pyx_k_class[] = "__class__"; +static const char __pyx_k_error[] = "error"; +static const char __pyx_k_flags[] = "flags"; +static const char __pyx_k_paths[] = "paths"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_start[] = "start"; +static const char __pyx_k_encode[] = "encode"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_name_2[] = "__name__"; +static const char __pyx_k_pickle[] = "pickle"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_struct[] = "struct"; +static const char __pyx_k_unpack[] = "unpack"; +static const char __pyx_k_update[] = "update"; +static const char __pyx_k_values[] = "values"; +static const char __pyx_k_fortran[] = "fortran"; +static const char __pyx_k_memview[] = "memview"; +static const char __pyx_k_Ellipsis[] = "Ellipsis"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_itemsize[] = "itemsize"; +static const char __pyx_k_pyx_type[] = "__pyx_type"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_enumerate[] = "enumerate"; +static const char __pyx_k_pyx_state[] = "__pyx_state"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_IndexError[] = "IndexError"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_pyx_result[] = "__pyx_result"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_PickleError[] = "PickleError"; +static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; +static const char __pyx_k_stringsource[] = "stringsource"; +static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; +static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; +static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; +static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_strided_and_direct[] = ""; +static const char __pyx_k_strided_and_indirect[] = ""; +static const char __pyx_k_contiguous_and_direct[] = ""; +static const char __pyx_k_MemoryView_of_r_object[] = ""; +static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; +static const char __pyx_k_contiguous_and_indirect[] = ""; +static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; +static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; +static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; +static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; +static const char __pyx_k_strided_and_direct_or_indirect[] = ""; +static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; +static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; +static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; +static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; +static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; +static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; +static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; +static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; +static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; +static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; +static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; +static PyObject *__pyx_n_s_ASCII; +static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; +static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; +static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; +static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; +static PyObject *__pyx_kp_s_Cannot_index_with_type_s; +static PyObject *__pyx_n_s_Ellipsis; +static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; +static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; +static PyObject *__pyx_n_s_IndexError; +static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; +static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; +static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; +static PyObject *__pyx_n_s_MemoryError; +static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; +static PyObject *__pyx_kp_s_MemoryView_of_r_object; +static PyObject *__pyx_n_b_O; +static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; +static PyObject *__pyx_n_s_PickleError; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_View_MemoryView; +static PyObject *__pyx_n_s_allocate_buffer; +static PyObject *__pyx_n_s_base; +static PyObject *__pyx_n_s_c; +static PyObject *__pyx_n_u_c; +static PyObject *__pyx_n_s_class; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_kp_s_contiguous_and_direct; +static PyObject *__pyx_kp_s_contiguous_and_indirect; +static PyObject *__pyx_n_s_dict; +static PyObject *__pyx_n_s_dtype_is_object; +static PyObject *__pyx_n_s_encode; +static PyObject *__pyx_n_s_enumerate; +static PyObject *__pyx_n_s_error; +static PyObject *__pyx_n_s_flags; +static PyObject *__pyx_n_s_format; +static PyObject *__pyx_n_s_fortran; +static PyObject *__pyx_n_u_fortran; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; +static PyObject *__pyx_n_s_id; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_n_s_itemsize; +static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_memview; +static PyObject *__pyx_n_s_mode; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_name_2; +static PyObject *__pyx_n_s_ndim; +static PyObject *__pyx_n_s_new; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_obj; +static PyObject *__pyx_n_s_pack; +static PyObject *__pyx_n_s_paths; +static PyObject *__pyx_n_s_pickle; +static PyObject *__pyx_n_s_pyx_PickleError; +static PyObject *__pyx_n_s_pyx_checksum; +static PyObject *__pyx_n_s_pyx_getbuffer; +static PyObject *__pyx_n_s_pyx_result; +static PyObject *__pyx_n_s_pyx_state; +static PyObject *__pyx_n_s_pyx_type; +static PyObject *__pyx_n_s_pyx_unpickle_Enum; +static PyObject *__pyx_n_s_pyx_vtable; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_shape; +static PyObject *__pyx_n_s_size; +static PyObject *__pyx_n_s_start; +static PyObject *__pyx_n_s_step; +static PyObject *__pyx_n_s_stop; +static PyObject *__pyx_kp_s_strided_and_direct; +static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; +static PyObject *__pyx_kp_s_strided_and_indirect; +static PyObject *__pyx_kp_s_stringsource; +static PyObject *__pyx_n_s_struct; +static PyObject *__pyx_n_s_t_xs; +static PyObject *__pyx_n_s_t_ys; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_kp_s_unable_to_allocate_array_data; +static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; +static PyObject *__pyx_n_s_unpack; +static PyObject *__pyx_n_s_update; +static PyObject *__pyx_n_s_values; +static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_int_0; +static PyObject *__pyx_int_1; +static PyObject *__pyx_int_184977713; +static PyObject *__pyx_int_neg_1; +static float __pyx_k_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_slice__16; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__14; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_tuple__18; +static PyObject *__pyx_tuple__19; +static PyObject *__pyx_tuple__20; +static PyObject *__pyx_tuple__21; +static PyObject *__pyx_tuple__22; +static PyObject *__pyx_tuple__23; +static PyObject *__pyx_tuple__24; +static PyObject *__pyx_tuple__25; +static PyObject *__pyx_codeobj__26; +/* Late includes */ + +/* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + +static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { + float __pyx_v_max_neg_val = __pyx_k_; + int __pyx_v_x; + int __pyx_v_y; + float __pyx_v_v_prev; + float __pyx_v_v_cur; + int __pyx_v_index; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + long __pyx_t_4; + int __pyx_t_5; + long __pyx_t_6; + long __pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + float __pyx_t_11; + float __pyx_t_12; + float __pyx_t_13; + int __pyx_t_14; + Py_ssize_t __pyx_t_15; + Py_ssize_t __pyx_t_16; + if (__pyx_optional_args) { + if (__pyx_optional_args->__pyx_n > 0) { + __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; + } + } + + /* "monotonic_align/core.pyx":13 + * cdef float v_cur + * cdef float tmp + * cdef int index = t_x - 1 # <<<<<<<<<<<<<< + * + * for y in range(t_y): + */ + __pyx_v_index = (__pyx_v_t_x - 1); + + /* "monotonic_align/core.pyx":15 + * cdef int index = t_x - 1 + * + * for y in range(t_y): # <<<<<<<<<<<<<< + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: + */ + __pyx_t_1 = __pyx_v_t_y; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_y = __pyx_t_3; + + /* "monotonic_align/core.pyx":16 + * + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< + * if x == y: + * v_cur = max_neg_val + */ + __pyx_t_4 = (__pyx_v_y + 1); + __pyx_t_5 = __pyx_v_t_x; + if (((__pyx_t_4 < __pyx_t_5) != 0)) { + __pyx_t_6 = __pyx_t_4; + } else { + __pyx_t_6 = __pyx_t_5; + } + __pyx_t_4 = __pyx_t_6; + __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); + __pyx_t_6 = 0; + if (((__pyx_t_5 > __pyx_t_6) != 0)) { + __pyx_t_7 = __pyx_t_5; + } else { + __pyx_t_7 = __pyx_t_6; + } + __pyx_t_6 = __pyx_t_4; + for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { + __pyx_v_x = __pyx_t_5; + + /* "monotonic_align/core.pyx":17 + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: # <<<<<<<<<<<<<< + * v_cur = max_neg_val + * else: + */ + __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":18 + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: + * v_cur = max_neg_val # <<<<<<<<<<<<<< + * else: + * v_cur = value[y-1, x] + */ + __pyx_v_v_cur = __pyx_v_max_neg_val; + + /* "monotonic_align/core.pyx":17 + * for y in range(t_y): + * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + * if x == y: # <<<<<<<<<<<<<< + * v_cur = max_neg_val + * else: + */ + goto __pyx_L7; + } + + /* "monotonic_align/core.pyx":20 + * v_cur = max_neg_val + * else: + * v_cur = value[y-1, x] # <<<<<<<<<<<<<< + * if x == 0: + * if y == 0: + */ + /*else*/ { + __pyx_t_9 = (__pyx_v_y - 1); + __pyx_t_10 = __pyx_v_x; + __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); + } + __pyx_L7:; + + /* "monotonic_align/core.pyx":21 + * else: + * v_cur = value[y-1, x] + * if x == 0: # <<<<<<<<<<<<<< + * if y == 0: + * v_prev = 0. + */ + __pyx_t_8 = ((__pyx_v_x == 0) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":22 + * v_cur = value[y-1, x] + * if x == 0: + * if y == 0: # <<<<<<<<<<<<<< + * v_prev = 0. + * else: + */ + __pyx_t_8 = ((__pyx_v_y == 0) != 0); + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":23 + * if x == 0: + * if y == 0: + * v_prev = 0. # <<<<<<<<<<<<<< + * else: + * v_prev = max_neg_val + */ + __pyx_v_v_prev = 0.; + + /* "monotonic_align/core.pyx":22 + * v_cur = value[y-1, x] + * if x == 0: + * if y == 0: # <<<<<<<<<<<<<< + * v_prev = 0. + * else: + */ + goto __pyx_L9; + } + + /* "monotonic_align/core.pyx":25 + * v_prev = 0. + * else: + * v_prev = max_neg_val # <<<<<<<<<<<<<< + * else: + * v_prev = value[y-1, x-1] + */ + /*else*/ { + __pyx_v_v_prev = __pyx_v_max_neg_val; + } + __pyx_L9:; + + /* "monotonic_align/core.pyx":21 + * else: + * v_cur = value[y-1, x] + * if x == 0: # <<<<<<<<<<<<<< + * if y == 0: + * v_prev = 0. + */ + goto __pyx_L8; + } + + /* "monotonic_align/core.pyx":27 + * v_prev = max_neg_val + * else: + * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< + * value[y, x] += max(v_prev, v_cur) + * + */ + /*else*/ { + __pyx_t_10 = (__pyx_v_y - 1); + __pyx_t_9 = (__pyx_v_x - 1); + __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); + } + __pyx_L8:; + + /* "monotonic_align/core.pyx":28 + * else: + * v_prev = value[y-1, x-1] + * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< + * + * for y in range(t_y - 1, -1, -1): + */ + __pyx_t_11 = __pyx_v_v_cur; + __pyx_t_12 = __pyx_v_v_prev; + if (((__pyx_t_11 > __pyx_t_12) != 0)) { + __pyx_t_13 = __pyx_t_11; + } else { + __pyx_t_13 = __pyx_t_12; + } + __pyx_t_9 = __pyx_v_y; + __pyx_t_10 = __pyx_v_x; + *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; + } + } + + /* "monotonic_align/core.pyx":30 + * value[y, x] += max(v_prev, v_cur) + * + * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + */ + for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_y = __pyx_t_1; + + /* "monotonic_align/core.pyx":31 + * + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 # <<<<<<<<<<<<<< + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + * index = index - 1 + */ + __pyx_t_10 = __pyx_v_y; + __pyx_t_9 = __pyx_v_index; + *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; + + /* "monotonic_align/core.pyx":32 + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< + * index = index - 1 + * + */ + __pyx_t_14 = ((__pyx_v_index != 0) != 0); + if (__pyx_t_14) { + } else { + __pyx_t_8 = __pyx_t_14; + goto __pyx_L13_bool_binop_done; + } + __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); + if (!__pyx_t_14) { + } else { + __pyx_t_8 = __pyx_t_14; + goto __pyx_L13_bool_binop_done; + } + __pyx_t_9 = (__pyx_v_y - 1); + __pyx_t_10 = __pyx_v_index; + __pyx_t_15 = (__pyx_v_y - 1); + __pyx_t_16 = (__pyx_v_index - 1); + __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); + __pyx_t_8 = __pyx_t_14; + __pyx_L13_bool_binop_done:; + if (__pyx_t_8) { + + /* "monotonic_align/core.pyx":33 + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + * index = index - 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_index = (__pyx_v_index - 1); + + /* "monotonic_align/core.pyx":32 + * for y in range(t_y - 1, -1, -1): + * path[y, index] = 1 + * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< + * index = index - 1 + * + */ + } + } + + /* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + + /* function exit code */ +} + +/* "monotonic_align/core.pyx":38 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< + * cdef int b = paths.shape[0] + * cdef int i + */ + +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { + CYTHON_UNUSED int __pyx_v_b; + int __pyx_v_i; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; + Py_ssize_t __pyx_t_6; + Py_ssize_t __pyx_t_7; + + /* "monotonic_align/core.pyx":39 + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< + * cdef int i + * for i in prange(b, nogil=True): + */ + __pyx_v_b = (__pyx_v_paths.shape[0]); + + /* "monotonic_align/core.pyx":41 + * cdef int b = paths.shape[0] + * cdef int i + * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + */ + { + #ifdef WITH_THREAD + PyThreadState *_save; + Py_UNBLOCK_THREADS + __Pyx_FastGIL_Remember(); + #endif + /*try:*/ { + __pyx_t_1 = __pyx_v_b; + if ((1 == 0)) abort(); + { + #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) + #undef likely + #undef unlikely + #define likely(x) (x) + #define unlikely(x) (x) + #endif + __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; + if (__pyx_t_3 > 0) + { + #ifdef _OPENMP + #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) + #endif /* _OPENMP */ + { + #ifdef _OPENMP + #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) + #endif /* _OPENMP */ + for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ + { + __pyx_v_i = (int)(0 + 1 * __pyx_t_2); + + /* "monotonic_align/core.pyx":42 + * cdef int i + * for i in prange(b, nogil=True): + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< + */ + __pyx_t_4.data = __pyx_v_paths.data; + __pyx_t_4.memview = __pyx_v_paths.memview; + __PYX_INC_MEMVIEW(&__pyx_t_4, 0); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_i; + Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; + __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; +__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; + __pyx_t_4.suboffsets[0] = -1; + +__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; +__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; + __pyx_t_4.suboffsets[1] = -1; + +__pyx_t_5.data = __pyx_v_values.data; + __pyx_t_5.memview = __pyx_v_values.memview; + __PYX_INC_MEMVIEW(&__pyx_t_5, 0); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_i; + Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; + __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; +__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; + __pyx_t_5.suboffsets[0] = -1; + +__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; +__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; + __pyx_t_5.suboffsets[1] = -1; + +__pyx_t_6 = __pyx_v_i; + __pyx_t_7 = __pyx_v_i; + __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); + __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); + __pyx_t_4.memview = NULL; + __pyx_t_4.data = NULL; + __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); + __pyx_t_5.memview = NULL; + __pyx_t_5.data = NULL; + } + } + } + } + } + #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) + #undef likely + #undef unlikely + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) + #endif + } + + /* "monotonic_align/core.pyx":41 + * cdef int b = paths.shape[0] + * cdef int i + * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< + * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + */ + /*finally:*/ { + /*normal exit:*/{ + #ifdef WITH_THREAD + __Pyx_FastGIL_Forget(); + Py_BLOCK_THREADS + #endif + goto __pyx_L5; + } + __pyx_L5:; + } + } + + /* "monotonic_align/core.pyx":38 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< + * cdef int b = paths.shape[0] + * cdef int i + */ + + /* function exit code */ +} + +/* Python wrapper */ +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; + PyObject* values[4] = {0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + } + __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("maximum_path_c", 0); + __Pyx_XDECREF(__pyx_r); + if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } + if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } + __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); + __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + +/* Python wrapper */ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_shape = 0; + Py_ssize_t __pyx_v_itemsize; + PyObject *__pyx_v_format = 0; + PyObject *__pyx_v_mode = 0; + int __pyx_v_allocate_buffer; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; + PyObject* values[5] = {0,0,0,0,0}; + values[3] = ((PyObject *)__pyx_n_s_c); + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); + if (value) { values[4] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_shape = ((PyObject*)values[0]); + __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_v_format = values[2]; + __pyx_v_mode = values[3]; + if (values[4]) { + __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) + } else { + + /* "View.MemoryView":123 + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< + * + * cdef int idx + */ + __pyx_v_allocate_buffer = ((int)1); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) + if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { + PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) + } + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { + int __pyx_v_idx; + Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_dim; + PyObject **__pyx_v_p; + char __pyx_v_order; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + char *__pyx_t_7; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + Py_ssize_t __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + __Pyx_INCREF(__pyx_v_format); + + /* "View.MemoryView":129 + * cdef PyObject **p + * + * self.ndim = len(shape) # <<<<<<<<<<<<<< + * self.itemsize = itemsize + * + */ + if (unlikely(__pyx_v_shape == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 129, __pyx_L1_error) + } + __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) + __pyx_v_self->ndim = ((int)__pyx_t_1); + + /* "View.MemoryView":130 + * + * self.ndim = len(shape) + * self.itemsize = itemsize # <<<<<<<<<<<<<< + * + * if not self.ndim: + */ + __pyx_v_self->itemsize = __pyx_v_itemsize; + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 133, __pyx_L1_error) + + /* "View.MemoryView":132 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError("Empty shape tuple for cython.array") + * + */ + } + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 136, __pyx_L1_error) + + /* "View.MemoryView":135 + * raise ValueError("Empty shape tuple for cython.array") + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError("itemsize <= 0 for cython.array") + * + */ + } + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_format); + __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":139 + * + * if not isinstance(format, bytes): + * format = format.encode('ASCII') # <<<<<<<<<<<<<< + * self._format = format # keep a reference to the byte string + * self.format = self._format + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + } + } + __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":138 + * raise ValueError("itemsize <= 0 for cython.array") + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + } + + /* "View.MemoryView":140 + * if not isinstance(format, bytes): + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< + * self.format = self._format + * + */ + if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) + __pyx_t_3 = __pyx_v_format; + __Pyx_INCREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_3); + __Pyx_GOTREF(__pyx_v_self->_format); + __Pyx_DECREF(__pyx_v_self->_format); + __pyx_v_self->_format = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":141 + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + * self.format = self._format # <<<<<<<<<<<<<< + * + * + */ + if (unlikely(__pyx_v_self->_format == Py_None)) { + PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); + __PYX_ERR(1, 141, __pyx_L1_error) + } + __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) + __pyx_v_self->format = __pyx_t_7; + + /* "View.MemoryView":144 + * + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< + * self._strides = self._shape + self.ndim + * + */ + __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); + + /* "View.MemoryView":145 + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< + * + * if not self._shape: + */ + __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 148, __pyx_L1_error) + + /* "View.MemoryView":147 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate shape and strides.") + * + */ + } + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + __pyx_t_8 = 0; + __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; + for (;;) { + if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_9; + __pyx_v_idx = __pyx_t_8; + __pyx_t_8 = (__pyx_t_8 + 1); + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":153 + * for idx, dim in enumerate(shape): + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< + * self._shape[idx] = dim + * + */ + __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); + __pyx_t_5 = 0; + __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 153, __pyx_L1_error) + + /* "View.MemoryView":152 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim + */ + } + + /* "View.MemoryView":154 + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + * self._shape[idx] = dim # <<<<<<<<<<<<<< + * + * cdef char order + */ + (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; + + /* "View.MemoryView":151 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) + if (__pyx_t_4) { + + /* "View.MemoryView":158 + * cdef char order + * if mode == 'fortran': + * order = b'F' # <<<<<<<<<<<<<< + * self.mode = u'fortran' + * elif mode == 'c': + */ + __pyx_v_order = 'F'; + + /* "View.MemoryView":159 + * if mode == 'fortran': + * order = b'F' + * self.mode = u'fortran' # <<<<<<<<<<<<<< + * elif mode == 'c': + * order = b'C' + */ + __Pyx_INCREF(__pyx_n_u_fortran); + __Pyx_GIVEREF(__pyx_n_u_fortran); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_fortran; + + /* "View.MemoryView":157 + * + * cdef char order + * if mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) + if (likely(__pyx_t_4)) { + + /* "View.MemoryView":161 + * self.mode = u'fortran' + * elif mode == 'c': + * order = b'C' # <<<<<<<<<<<<<< + * self.mode = u'c' + * else: + */ + __pyx_v_order = 'C'; + + /* "View.MemoryView":162 + * elif mode == 'c': + * order = b'C' + * self.mode = u'c' # <<<<<<<<<<<<<< + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + */ + __Pyx_INCREF(__pyx_n_u_c); + __Pyx_GIVEREF(__pyx_n_u_c); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_c; + + /* "View.MemoryView":160 + * order = b'F' + * self.mode = u'fortran' + * elif mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + goto __pyx_L10; + } + + /* "View.MemoryView":164 + * self.mode = u'c' + * else: + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< + * + * self.len = fill_contig_strides_array(self._shape, self._strides, + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 164, __pyx_L1_error) + } + __pyx_L10:; + + /* "View.MemoryView":166 + * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) + * + * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< + * itemsize, self.ndim, order) + * + */ + __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); + + /* "View.MemoryView":169 + * itemsize, self.ndim, order) + * + * self.free_data = allocate_buffer # <<<<<<<<<<<<<< + * self.dtype_is_object = format == b'O' + * if allocate_buffer: + */ + __pyx_v_self->free_data = __pyx_v_allocate_buffer; + + /* "View.MemoryView":170 + * + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< + * if allocate_buffer: + * + */ + __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_v_self->dtype_is_object = __pyx_t_4; + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = (__pyx_v_allocate_buffer != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":174 + * + * + * self.data = malloc(self.len) # <<<<<<<<<<<<<< + * if not self.data: + * raise MemoryError("unable to allocate array data.") + */ + __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(1, 176, __pyx_L1_error) + + /* "View.MemoryView":175 + * + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError("unable to allocate array data.") + * + */ + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":179 + * + * if self.dtype_is_object: + * p = self.data # <<<<<<<<<<<<<< + * for i in range(self.len / itemsize): + * p[i] = Py_None + */ + __pyx_v_p = ((PyObject **)__pyx_v_self->data); + + /* "View.MemoryView":180 + * if self.dtype_is_object: + * p = self.data + * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< + * p[i] = Py_None + * Py_INCREF(Py_None) + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 180, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); + __pyx_t_9 = __pyx_t_1; + for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { + __pyx_v_i = __pyx_t_11; + + /* "View.MemoryView":181 + * p = self.data + * for i in range(self.len / itemsize): + * p[i] = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + (__pyx_v_p[__pyx_v_i]) = Py_None; + + /* "View.MemoryView":182 + * for i in range(self.len / itemsize): + * p[i] = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + Py_INCREF(Py_None); + } + + /* "View.MemoryView":178 + * raise MemoryError("unable to allocate array data.") + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len / itemsize): + */ + } + + /* "View.MemoryView":171 + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' + * if allocate_buffer: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":122 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_format); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_bufmode; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + char *__pyx_t_4; + Py_ssize_t __pyx_t_5; + int __pyx_t_6; + Py_ssize_t *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":186 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 # <<<<<<<<<<<<<< + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = -1; + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":188 + * cdef int bufmode = -1 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":187 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + goto __pyx_L3; + } + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":190 + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + */ + __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":189 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + } + __pyx_L3:; + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 192, __pyx_L1_error) + + /* "View.MemoryView":191 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + */ + } + + /* "View.MemoryView":193 + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data # <<<<<<<<<<<<<< + * info.len = self.len + * info.ndim = self.ndim + */ + __pyx_t_4 = __pyx_v_self->data; + __pyx_v_info->buf = __pyx_t_4; + + /* "View.MemoryView":194 + * raise ValueError("Can only create a buffer that is contiguous in memory.") + * info.buf = self.data + * info.len = self.len # <<<<<<<<<<<<<< + * info.ndim = self.ndim + * info.shape = self._shape + */ + __pyx_t_5 = __pyx_v_self->len; + __pyx_v_info->len = __pyx_t_5; + + /* "View.MemoryView":195 + * info.buf = self.data + * info.len = self.len + * info.ndim = self.ndim # <<<<<<<<<<<<<< + * info.shape = self._shape + * info.strides = self._strides + */ + __pyx_t_6 = __pyx_v_self->ndim; + __pyx_v_info->ndim = __pyx_t_6; + + /* "View.MemoryView":196 + * info.len = self.len + * info.ndim = self.ndim + * info.shape = self._shape # <<<<<<<<<<<<<< + * info.strides = self._strides + * info.suboffsets = NULL + */ + __pyx_t_7 = __pyx_v_self->_shape; + __pyx_v_info->shape = __pyx_t_7; + + /* "View.MemoryView":197 + * info.ndim = self.ndim + * info.shape = self._shape + * info.strides = self._strides # <<<<<<<<<<<<<< + * info.suboffsets = NULL + * info.itemsize = self.itemsize + */ + __pyx_t_7 = __pyx_v_self->_strides; + __pyx_v_info->strides = __pyx_t_7; + + /* "View.MemoryView":198 + * info.shape = self._shape + * info.strides = self._strides + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = self.itemsize + * info.readonly = 0 + */ + __pyx_v_info->suboffsets = NULL; + + /* "View.MemoryView":199 + * info.strides = self._strides + * info.suboffsets = NULL + * info.itemsize = self.itemsize # <<<<<<<<<<<<<< + * info.readonly = 0 + * + */ + __pyx_t_5 = __pyx_v_self->itemsize; + __pyx_v_info->itemsize = __pyx_t_5; + + /* "View.MemoryView":200 + * info.suboffsets = NULL + * info.itemsize = self.itemsize + * info.readonly = 0 # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + __pyx_v_info->readonly = 0; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":203 + * + * if flags & PyBUF_FORMAT: + * info.format = self.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_4 = __pyx_v_self->format; + __pyx_v_info->format = __pyx_t_4; + + /* "View.MemoryView":202 + * info.readonly = 0 + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.format + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":205 + * info.format = self.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.obj = self + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L5:; + + /* "View.MemoryView":207 + * info.format = NULL + * + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":185 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * cdef int bufmode = -1 + * if self.mode == u"c": + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + +/* Python wrapper */ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":213 + * def __dealloc__(array self): + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) # <<<<<<<<<<<<<< + * elif self.free_data: + * if self.dtype_is_object: + */ + __pyx_v_self->callback_free_data(__pyx_v_self->data); + + /* "View.MemoryView":212 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + __pyx_t_1 = (__pyx_v_self->free_data != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":216 + * elif self.free_data: + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< + * self._strides, self.ndim, False) + * free(self.data) + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); + + /* "View.MemoryView":215 + * self.callback_free_data(self.data) + * elif self.free_data: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + */ + } + + /* "View.MemoryView":218 + * refcount_objects_in_slice(self.data, self._shape, + * self._strides, self.ndim, False) + * free(self.data) # <<<<<<<<<<<<<< + * PyObject_Free(self._shape) + * + */ + free(__pyx_v_self->data); + + /* "View.MemoryView":214 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, + */ + } + __pyx_L3:; + + /* "View.MemoryView":219 + * self._strides, self.ndim, False) + * free(self.data) + * PyObject_Free(self._shape) # <<<<<<<<<<<<<< + * + * @property + */ + PyObject_Free(__pyx_v_self->_shape); + + /* "View.MemoryView":211 + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":223 + * @property + * def memview(self): + * return self.get_memview() # <<<<<<<<<<<<<< + * + * @cname('get_memview') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":222 + * + * @property + * def memview(self): # <<<<<<<<<<<<<< + * return self.get_memview() + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_memview", 0); + + /* "View.MemoryView":227 + * @cname('get_memview') + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< + * return memoryview(self, flags, self.dtype_is_object) + * + */ + __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); + + /* "View.MemoryView":228 + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":226 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":231 + * + * def __len__(self): + * return self._shape[0] # <<<<<<<<<<<<<< + * + * def __getattr__(self, attr): + */ + __pyx_r = (__pyx_v_self->_shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":230 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getattr__", 0); + + /* "View.MemoryView":234 + * + * def __getattr__(self, attr): + * return getattr(self.memview, attr) # <<<<<<<<<<<<<< + * + * def __getitem__(self, item): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":233 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":237 + * + * def __getitem__(self, item): + * return self.memview[item] # <<<<<<<<<<<<<< + * + * def __setitem__(self, item, value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":236 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + +/* Python wrapper */ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + + /* "View.MemoryView":240 + * + * def __setitem__(self, item, value): + * self.memview[item] = value # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":239 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + +static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { + struct __pyx_array_obj *__pyx_v_result = 0; + struct __pyx_array_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("array_cwrapper", 0); + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":249 + * + * if buf == NULL: + * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); + __pyx_t_2 = 0; + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":248 + * cdef array result + * + * if buf == NULL: # <<<<<<<<<<<<<< + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + /*else*/ { + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); + __pyx_t_4 = 0; + __pyx_t_5 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":252 + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) # <<<<<<<<<<<<<< + * result.data = buf + * + */ + __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) + + /* "View.MemoryView":251 + * result = array(shape, itemsize, format, mode.decode('ASCII')) + * else: + * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< + * allocate_buffer=False) + * result.data = buf + */ + __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); + __pyx_t_5 = 0; + + /* "View.MemoryView":253 + * result = array(shape, itemsize, format, mode.decode('ASCII'), + * allocate_buffer=False) + * result.data = buf # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->data = __pyx_v_buf; + } + __pyx_L3:; + + /* "View.MemoryView":255 + * result.data = buf + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":244 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< + * char *mode, char *buf): + * cdef array result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + +/* Python wrapper */ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_name = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_name = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__", 0); + + /* "View.MemoryView":282 + * cdef object name + * def __init__(self, name): + * self.name = name # <<<<<<<<<<<<<< + * def __repr__(self): + * return self.name + */ + __Pyx_INCREF(__pyx_v_name); + __Pyx_GIVEREF(__pyx_v_name); + __Pyx_GOTREF(__pyx_v_self->name); + __Pyx_DECREF(__pyx_v_self->name); + __pyx_v_self->name = __pyx_v_name; + + /* "View.MemoryView":281 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + +/* Python wrapper */ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":284 + * self.name = name + * def __repr__(self): + * return self.name # <<<<<<<<<<<<<< + * + * cdef generic = Enum("") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->name); + __pyx_r = __pyx_v_self->name; + goto __pyx_L0; + + /* "View.MemoryView":283 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_v_state = 0; + PyObject *__pyx_v__dict = 0; + int __pyx_v_use_setstate; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":5 + * cdef object _dict + * cdef bint use_setstate + * state = (self.name,) # <<<<<<<<<<<<<< + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_self->name); + __Pyx_GIVEREF(__pyx_v_self->name); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); + __pyx_v_state = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "(tree fragment)":6 + * cdef bint use_setstate + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< + * if _dict is not None: + * state += (_dict,) + */ + __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v__dict = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + __pyx_t_2 = (__pyx_v__dict != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":8 + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + * state += (_dict,) # <<<<<<<<<<<<<< + * use_setstate = True + * else: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v__dict); + __Pyx_GIVEREF(__pyx_v__dict); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); + __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); + __pyx_t_4 = 0; + + /* "(tree fragment)":9 + * if _dict is not None: + * state += (_dict,) + * use_setstate = True # <<<<<<<<<<<<<< + * else: + * use_setstate = self.name is not None + */ + __pyx_v_use_setstate = 1; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + goto __pyx_L3; + } + + /* "(tree fragment)":11 + * use_setstate = True + * else: + * use_setstate = self.name is not None # <<<<<<<<<<<<<< + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_self->name != Py_None); + __pyx_v_use_setstate = __pyx_t_3; + } + __pyx_L3:; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + __pyx_t_3 = (__pyx_v_use_setstate != 0); + if (__pyx_t_3) { + + /* "(tree fragment)":13 + * use_setstate = self.name is not None + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); + __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); + __pyx_t_4 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + */ + } + + /* "(tree fragment)":15 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_INCREF(__pyx_int_184977713); + __Pyx_GIVEREF(__pyx_int_184977713); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); + __pyx_t_5 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + } + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_state); + __Pyx_XDECREF(__pyx_v__dict); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":17 + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) + __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + +static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { + Py_intptr_t __pyx_v_aligned_p; + size_t __pyx_v_offset; + void *__pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":300 + * cdef void *align_pointer(void *memory, size_t alignment) nogil: + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< + * cdef size_t offset + * + */ + __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); + + /* "View.MemoryView":304 + * + * with cython.cdivision(True): + * offset = aligned_p % alignment # <<<<<<<<<<<<<< + * + * if offset > 0: + */ + __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + __pyx_t_1 = ((__pyx_v_offset > 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":307 + * + * if offset > 0: + * aligned_p += alignment - offset # <<<<<<<<<<<<<< + * + * return aligned_p + */ + __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); + + /* "View.MemoryView":306 + * offset = aligned_p % alignment + * + * if offset > 0: # <<<<<<<<<<<<<< + * aligned_p += alignment - offset + * + */ + } + + /* "View.MemoryView":309 + * aligned_p += alignment - offset + * + * return aligned_p # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = ((void *)__pyx_v_aligned_p); + goto __pyx_L0; + + /* "View.MemoryView":298 + * + * @cname('__pyx_align_pointer') + * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< + * "Align pointer memory on a given boundary" + * cdef Py_intptr_t aligned_p = memory + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + +/* Python wrapper */ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_obj = 0; + int __pyx_v_flags; + int __pyx_v_dtype_is_object; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); + if (value) { values[2] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_obj = values[0]; + __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + if (values[2]) { + __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + __pyx_v_dtype_is_object = ((int)0); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "View.MemoryView":346 + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj # <<<<<<<<<<<<<< + * self.flags = flags + * if type(self) is memoryview or obj is not None: + */ + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + __Pyx_GOTREF(__pyx_v_self->obj); + __Pyx_DECREF(__pyx_v_self->obj); + __pyx_v_self->obj = __pyx_v_obj; + + /* "View.MemoryView":347 + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj + * self.flags = flags # <<<<<<<<<<<<<< + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + */ + __pyx_v_self->flags = __pyx_v_flags; + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); + __pyx_t_3 = (__pyx_t_2 != 0); + if (!__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_3 = (__pyx_v_obj != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "View.MemoryView":349 + * self.flags = flags + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + */ + __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":351 + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; + + /* "View.MemoryView":352 + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * global __pyx_memoryview_thread_locks_used + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":350 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + } + + /* "View.MemoryView":348 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + } + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":356 + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + */ + __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + + /* "View.MemoryView":357 + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); + + /* "View.MemoryView":355 + * + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":359 + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< + * if self.lock is NULL: + * raise MemoryError + */ + __pyx_v_self->lock = PyThread_allocate_lock(); + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":361 + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) + + /* "View.MemoryView":360 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + } + + /* "View.MemoryView":358 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + } + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":364 + * + * if flags & PyBUF_FORMAT: + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< + * else: + * self.dtype_is_object = dtype_is_object + */ + __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_self->dtype_is_object = __pyx_t_1; + + /* "View.MemoryView":363 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + goto __pyx_L10; + } + + /* "View.MemoryView":366 + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + */ + /*else*/ { + __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; + } + __pyx_L10:; + + /* "View.MemoryView":368 + * self.dtype_is_object = dtype_is_object + * + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL + */ + __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); + + /* "View.MemoryView":370 + * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( + * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) + * self.typeinfo = NULL # <<<<<<<<<<<<<< + * + * def __dealloc__(memoryview self): + */ + __pyx_v_self->typeinfo = NULL; + + /* "View.MemoryView":345 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + +/* Python wrapper */ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { + int __pyx_v_i; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyThread_type_lock __pyx_t_6; + PyThread_type_lock __pyx_t_7; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + __pyx_t_1 = (__pyx_v_self->obj != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":374 + * def __dealloc__(memoryview self): + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + */ + __Pyx_ReleaseBuffer((&__pyx_v_self->view)); + + /* "View.MemoryView":373 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":377 + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< + * Py_DECREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; + + /* "View.MemoryView":378 + * + * (<__pyx_buffer *> &self.view).obj = NULL + * Py_DECREF(Py_None) # <<<<<<<<<<<<<< + * + * cdef int i + */ + Py_DECREF(Py_None); + + /* "View.MemoryView":375 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + } + __pyx_L3:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":383 + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + */ + __pyx_t_3 = __pyx_memoryview_thread_locks_used; + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":385 + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":388 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< + * break + * else: + */ + __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); + + /* "View.MemoryView":387 + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break + */ + (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; + (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; + + /* "View.MemoryView":386 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + } + + /* "View.MemoryView":389 + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break # <<<<<<<<<<<<<< + * else: + * PyThread_free_lock(self.lock) + */ + goto __pyx_L6_break; + + /* "View.MemoryView":384 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + } + } + /*else*/ { + + /* "View.MemoryView":391 + * break + * else: + * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + */ + PyThread_free_lock(__pyx_v_self->lock); + } + __pyx_L6_break:; + + /* "View.MemoryView":382 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + } + + /* "View.MemoryView":372 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + Py_ssize_t __pyx_v_dim; + char *__pyx_v_itemp; + PyObject *__pyx_v_idx = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + Py_ssize_t __pyx_t_6; + char *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_item_pointer", 0); + + /* "View.MemoryView":395 + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< + * + * for dim, idx in enumerate(index): + */ + __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + __pyx_t_1 = 0; + if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { + __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } else { + if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 397, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_5); + } + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); + __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_1; + __pyx_t_1 = (__pyx_t_1 + 1); + + /* "View.MemoryView":398 + * + * for dim, idx in enumerate(index): + * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< + * + * return itemp + */ + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_7; + + /* "View.MemoryView":397 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":400 + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + * return itemp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_itemp; + goto __pyx_L0; + + /* "View.MemoryView":393 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_indices = NULL; + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + char *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":405 + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: + * return self # <<<<<<<<<<<<<< + * + * have_slices, indices = _unellipsify(index, self.view.ndim) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __pyx_r = ((PyObject *)__pyx_v_self); + goto __pyx_L0; + + /* "View.MemoryView":404 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + } + + /* "View.MemoryView":407 + * return self + * + * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * cdef char *itemp + */ + __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (likely(__pyx_t_3 != Py_None)) { + PyObject* sequence = __pyx_t_3; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 407, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + #else + __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_v_indices = __pyx_t_5; + __pyx_t_5 = 0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) + if (__pyx_t_2) { + + /* "View.MemoryView":411 + * cdef char *itemp + * if have_slices: + * return memview_slice(self, indices) # <<<<<<<<<<<<<< + * else: + * itemp = self.get_item_pointer(indices) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":410 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + } + + /* "View.MemoryView":413 + * return memview_slice(self, indices) + * else: + * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< + * return self.convert_item_to_object(itemp) + * + */ + /*else*/ { + __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_6; + + /* "View.MemoryView":414 + * else: + * itemp = self.get_item_pointer(indices) + * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< + * + * def __setitem__(memoryview self, object index, object value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":403 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_indices); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + +/* Python wrapper */ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_obj = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + __Pyx_INCREF(__pyx_v_index); + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + __pyx_t_1 = (__pyx_v_self->view.readonly != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 418, __pyx_L1_error) + + /* "View.MemoryView":417 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError("Cannot assign to read-only memoryview") + * + */ + } + + /* "View.MemoryView":420 + * raise TypeError("Cannot assign to read-only memoryview") + * + * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * if have_slices: + */ + __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(__pyx_t_2 != Py_None)) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 420, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_3; + __pyx_t_3 = 0; + __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":423 + * + * if have_slices: + * obj = self.is_slice(value) # <<<<<<<<<<<<<< + * if obj: + * self.setitem_slice_assignment(self[index], obj) + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_obj = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":425 + * obj = self.is_slice(value) + * if obj: + * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< + * else: + * self.setitem_slice_assign_scalar(self[index], value) + */ + __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "View.MemoryView":424 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":427 + * self.setitem_slice_assignment(self[index], obj) + * else: + * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< + * else: + * self.setitem_indexed(index, value) + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L5:; + + /* "View.MemoryView":422 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":429 + * self.setitem_slice_assign_scalar(self[index], value) + * else: + * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< + * + * cdef is_slice(self, obj): + */ + /*else*/ { + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_L4:; + + /* "View.MemoryView":416 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_slice", 0); + __Pyx_INCREF(__pyx_v_obj); + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + /*try:*/ { + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":435 + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) # <<<<<<<<<<<<<< + * except TypeError: + * return None + */ + __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + + /* "View.MemoryView":434 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); + __pyx_t_6 = 0; + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L9_try_end; + __pyx_L4_error:; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "View.MemoryView":436 + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + * except TypeError: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); + if (__pyx_t_9) { + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_GOTREF(__pyx_t_8); + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":437 + * self.dtype_is_object) + * except TypeError: + * return None # <<<<<<<<<<<<<< + * + * return obj + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_except_return; + } + goto __pyx_L6_except_error; + __pyx_L6_except_error:; + + /* "View.MemoryView":433 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L1_error; + __pyx_L7_except_return:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L0; + __pyx_L9_try_end:; + } + + /* "View.MemoryView":432 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + } + + /* "View.MemoryView":439 + * return None + * + * return obj # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assignment(self, dst, src): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "View.MemoryView":431 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { + __Pyx_memviewslice __pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_src_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + __Pyx_memviewslice *__pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":446 + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< + * src.ndim, dst.ndim, self.dtype_is_object) + * + */ + if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) + __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) + + /* "View.MemoryView":447 + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":445 + * cdef __Pyx_memviewslice src_slice + * + * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< + * get_slice_from_memview(dst, &dst_slice)[0], + * src.ndim, dst.ndim, self.dtype_is_object) + */ + __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) + + /* "View.MemoryView":441 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { + int __pyx_v_array[0x80]; + void *__pyx_v_tmp; + void *__pyx_v_item; + __Pyx_memviewslice *__pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_tmp_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + char const *__pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); + + /* "View.MemoryView":451 + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + * cdef int array[128] + * cdef void *tmp = NULL # <<<<<<<<<<<<<< + * cdef void *item + * + */ + __pyx_v_tmp = NULL; + + /* "View.MemoryView":456 + * cdef __Pyx_memviewslice *dst_slice + * cdef __Pyx_memviewslice tmp_slice + * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< + * + * if self.view.itemsize > sizeof(array): + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) + __pyx_v_dst_slice = __pyx_t_1; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":459 + * + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< + * if tmp == NULL: + * raise MemoryError + */ + __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":461 + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * item = tmp + * else: + */ + PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) + + /* "View.MemoryView":460 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + } + + /* "View.MemoryView":462 + * if tmp == NULL: + * raise MemoryError + * item = tmp # <<<<<<<<<<<<<< + * else: + * item = array + */ + __pyx_v_item = __pyx_v_tmp; + + /* "View.MemoryView":458 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":464 + * item = tmp + * else: + * item = array # <<<<<<<<<<<<<< + * + * try: + */ + /*else*/ { + __pyx_v_item = ((void *)__pyx_v_array); + } + __pyx_L3:; + + /* "View.MemoryView":466 + * item = array + * + * try: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * ( item)[0] = value + */ + /*try:*/ { + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":468 + * try: + * if self.dtype_is_object: + * ( item)[0] = value # <<<<<<<<<<<<<< + * else: + * self.assign_item_from_object( item, value) + */ + (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); + + /* "View.MemoryView":467 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":470 + * ( item)[0] = value + * else: + * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L8:; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":475 + * + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + * item, self.dtype_is_object) + */ + __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":474 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + } + + /* "View.MemoryView":476 + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< + * item, self.dtype_is_object) + * finally: + */ + __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); + } + + /* "View.MemoryView":479 + * item, self.dtype_is_object) + * finally: + * PyMem_Free(tmp) # <<<<<<<<<<<<<< + * + * cdef setitem_indexed(self, index, value): + */ + /*finally:*/ { + /*normal exit:*/{ + PyMem_Free(__pyx_v_tmp); + goto __pyx_L7; + } + __pyx_L6_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; + { + PyMem_Free(__pyx_v_tmp); + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); + } + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_XGIVEREF(__pyx_t_8); + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; + goto __pyx_L1_error; + } + __pyx_L7:; + } + + /* "View.MemoryView":449 + * src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_indexed", 0); + + /* "View.MemoryView":482 + * + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< + * self.assign_item_from_object(itemp, value) + * + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_1; + + /* "View.MemoryView":483 + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":481 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_v_struct = NULL; + PyObject *__pyx_v_bytesitem = 0; + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + PyObject *__pyx_t_9 = NULL; + size_t __pyx_t_10; + int __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":488 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef bytes bytesitem + * + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":491 + * cdef bytes bytesitem + * + * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< + * try: + * result = struct.unpack(self.view.format, bytesitem) + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "View.MemoryView":493 + * bytesitem = itemp[:self.view.itemsize] + * try: + * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< + * except struct.error: + * raise ValueError("Unable to convert item to object") + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + __pyx_t_8 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_8 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { + PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } else + #endif + { + __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_9); + if (__pyx_t_7) { + __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; + } + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); + __Pyx_INCREF(__pyx_v_bytesitem); + __Pyx_GIVEREF(__pyx_v_bytesitem); + PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); + __pyx_t_6 = 0; + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_result = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + } + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + /*else:*/ { + __pyx_t_10 = strlen(__pyx_v_self->view.format); + __pyx_t_11 = ((__pyx_t_10 == 1) != 0); + if (__pyx_t_11) { + + /* "View.MemoryView":498 + * else: + * if len(self.view.format) == 1: + * return result[0] # <<<<<<<<<<<<<< + * return result + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L6_except_return; + + /* "View.MemoryView":497 + * raise ValueError("Unable to convert item to object") + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + } + + /* "View.MemoryView":499 + * if len(self.view.format) == 1: + * return result[0] + * return result # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_result); + __pyx_r = __pyx_v_result; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; + + /* "View.MemoryView":494 + * try: + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: # <<<<<<<<<<<<<< + * raise ValueError("Unable to convert item to object") + * else: + */ + __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); + __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; + if (__pyx_t_8) { + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_9); + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GOTREF(__pyx_t_1); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 495, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + __pyx_L5_except_error:; + + /* "View.MemoryView":492 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "View.MemoryView":485 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesitem); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_v_struct = NULL; + char __pyx_v_c; + PyObject *__pyx_v_bytesvalue = 0; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + Py_ssize_t __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + char *__pyx_t_11; + char *__pyx_t_12; + char *__pyx_t_13; + char *__pyx_t_14; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":504 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef char c + * cdef bytes bytesvalue + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + __pyx_t_2 = PyTuple_Check(__pyx_v_value); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + + /* "View.MemoryView":510 + * + * if isinstance(value, tuple): + * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< + * else: + * bytesvalue = struct.pack(self.view.format, value) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":509 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":512 + * bytesvalue = struct.pack(self.view.format, *value) + * else: + * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< + * + * for i, c in enumerate(bytesvalue): + */ + /*else*/ { + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_5 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; + __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else + #endif + { + __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); + __Pyx_INCREF(__pyx_v_value); + __Pyx_GIVEREF(__pyx_v_value); + PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); + __pyx_t_1 = 0; + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = 0; + if (unlikely(__pyx_v_bytesvalue == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); + __PYX_ERR(1, 514, __pyx_L1_error) + } + __Pyx_INCREF(__pyx_v_bytesvalue); + __pyx_t_10 = __pyx_v_bytesvalue; + __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); + __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); + for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { + __pyx_t_11 = __pyx_t_14; + __pyx_v_c = (__pyx_t_11[0]); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + __pyx_v_i = __pyx_t_9; + + /* "View.MemoryView":514 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_9 = (__pyx_t_9 + 1); + + /* "View.MemoryView":515 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; + } + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "View.MemoryView":501 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesvalue); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + +/* Python wrapper */ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + char *__pyx_t_5; + void *__pyx_t_6; + int __pyx_t_7; + Py_ssize_t __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (__pyx_v_info == NULL) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_self->view.readonly != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 520, __pyx_L1_error) + + /* "View.MemoryView":519 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + */ + } + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":523 + * + * if flags & PyBUF_ND: + * info.shape = self.view.shape # <<<<<<<<<<<<<< + * else: + * info.shape = NULL + */ + __pyx_t_4 = __pyx_v_self->view.shape; + __pyx_v_info->shape = __pyx_t_4; + + /* "View.MemoryView":522 + * raise ValueError("Cannot create writable memory view from read-only memoryview") + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":525 + * info.shape = self.view.shape + * else: + * info.shape = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_STRIDES: + */ + /*else*/ { + __pyx_v_info->shape = NULL; + } + __pyx_L6:; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":528 + * + * if flags & PyBUF_STRIDES: + * info.strides = self.view.strides # <<<<<<<<<<<<<< + * else: + * info.strides = NULL + */ + __pyx_t_4 = __pyx_v_self->view.strides; + __pyx_v_info->strides = __pyx_t_4; + + /* "View.MemoryView":527 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + goto __pyx_L7; + } + + /* "View.MemoryView":530 + * info.strides = self.view.strides + * else: + * info.strides = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_INDIRECT: + */ + /*else*/ { + __pyx_v_info->strides = NULL; + } + __pyx_L7:; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":533 + * + * if flags & PyBUF_INDIRECT: + * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< + * else: + * info.suboffsets = NULL + */ + __pyx_t_4 = __pyx_v_self->view.suboffsets; + __pyx_v_info->suboffsets = __pyx_t_4; + + /* "View.MemoryView":532 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":535 + * info.suboffsets = self.view.suboffsets + * else: + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + /*else*/ { + __pyx_v_info->suboffsets = NULL; + } + __pyx_L8:; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":538 + * + * if flags & PyBUF_FORMAT: + * info.format = self.view.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_5 = __pyx_v_self->view.format; + __pyx_v_info->format = __pyx_t_5; + + /* "View.MemoryView":537 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":540 + * info.format = self.view.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.buf = self.view.buf + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L9:; + + /* "View.MemoryView":542 + * info.format = NULL + * + * info.buf = self.view.buf # <<<<<<<<<<<<<< + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + */ + __pyx_t_6 = __pyx_v_self->view.buf; + __pyx_v_info->buf = __pyx_t_6; + + /* "View.MemoryView":543 + * + * info.buf = self.view.buf + * info.ndim = self.view.ndim # <<<<<<<<<<<<<< + * info.itemsize = self.view.itemsize + * info.len = self.view.len + */ + __pyx_t_7 = __pyx_v_self->view.ndim; + __pyx_v_info->ndim = __pyx_t_7; + + /* "View.MemoryView":544 + * info.buf = self.view.buf + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< + * info.len = self.view.len + * info.readonly = self.view.readonly + */ + __pyx_t_8 = __pyx_v_self->view.itemsize; + __pyx_v_info->itemsize = __pyx_t_8; + + /* "View.MemoryView":545 + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + * info.len = self.view.len # <<<<<<<<<<<<<< + * info.readonly = self.view.readonly + * info.obj = self + */ + __pyx_t_8 = __pyx_v_self->view.len; + __pyx_v_info->len = __pyx_t_8; + + /* "View.MemoryView":546 + * info.itemsize = self.view.itemsize + * info.len = self.view.len + * info.readonly = self.view.readonly # <<<<<<<<<<<<<< + * info.obj = self + * + */ + __pyx_t_1 = __pyx_v_self->view.readonly; + __pyx_v_info->readonly = __pyx_t_1; + + /* "View.MemoryView":547 + * info.len = self.view.len + * info.readonly = self.view.readonly + * info.obj = self # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":518 + * + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":554 + * @property + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< + * transpose_memslice(&result.from_slice) + * return result + */ + __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":555 + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) + + /* "View.MemoryView":556 + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + * return result # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":553 + * + * @property + * def T(self): # <<<<<<<<<<<<<< + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":560 + * @property + * def base(self): + * return self.obj # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->obj); + __pyx_r = __pyx_v_self->obj; + goto __pyx_L0; + + /* "View.MemoryView":559 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_length; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":564 + * @property + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_length = (__pyx_t_2[0]); + __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":563 + * + * @property + * def shape(self): # <<<<<<<<<<<<<< + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_stride; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 570, __pyx_L1_error) + + /* "View.MemoryView":568 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError("Buffer view does not expose strides") + */ + } + + /* "View.MemoryView":572 + * raise ValueError("Buffer view does not expose strides") + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_v_stride = (__pyx_t_3[0]); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "View.MemoryView":567 + * + * @property + * def strides(self): # <<<<<<<<<<<<<< + * if self.view.strides == NULL: + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + Py_ssize_t *__pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":576 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + } + + /* "View.MemoryView":579 + * return (-1,) * self.view.ndim + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); + for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { + __pyx_t_4 = __pyx_t_6; + __pyx_v_suboffset = (__pyx_t_4[0]); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":575 + * + * @property + * def suboffsets(self): # <<<<<<<<<<<<<< + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":583 + * @property + * def ndim(self): + * return self.view.ndim # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":582 + * + * @property + * def ndim(self): # <<<<<<<<<<<<<< + * return self.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":587 + * @property + * def itemsize(self): + * return self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":586 + * + * @property + * def itemsize(self): # <<<<<<<<<<<<<< + * return self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":591 + * @property + * def nbytes(self): + * return self.size * self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":590 + * + * @property + * def nbytes(self): # <<<<<<<<<<<<<< + * return self.size * self.view.itemsize + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + __pyx_t_1 = (__pyx_v_self->_size == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":596 + * def size(self): + * if self._size is None: + * result = 1 # <<<<<<<<<<<<<< + * + * for length in self.view.shape[:self.view.ndim]: + */ + __Pyx_INCREF(__pyx_int_1); + __pyx_v_result = __pyx_int_1; + + /* "View.MemoryView":598 + * result = 1 + * + * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< + * result *= length + * + */ + __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); + __pyx_t_6 = 0; + + /* "View.MemoryView":599 + * + * for length in self.view.shape[:self.view.ndim]: + * result *= length # <<<<<<<<<<<<<< + * + * self._size = result + */ + __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); + __pyx_t_6 = 0; + } + + /* "View.MemoryView":601 + * result *= length + * + * self._size = result # <<<<<<<<<<<<<< + * + * return self._size + */ + __Pyx_INCREF(__pyx_v_result); + __Pyx_GIVEREF(__pyx_v_result); + __Pyx_GOTREF(__pyx_v_self->_size); + __Pyx_DECREF(__pyx_v_self->_size); + __pyx_v_self->_size = __pyx_v_result; + + /* "View.MemoryView":595 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + } + + /* "View.MemoryView":603 + * self._size = result + * + * return self._size # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->_size); + __pyx_r = __pyx_v_self->_size; + goto __pyx_L0; + + /* "View.MemoryView":594 + * + * @property + * def size(self): # <<<<<<<<<<<<<< + * if self._size is None: + * result = 1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":607 + * def __len__(self): + * if self.view.ndim >= 1: + * return self.view.shape[0] # <<<<<<<<<<<<<< + * + * return 0 + */ + __pyx_r = (__pyx_v_self->view.shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":606 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + } + + /* "View.MemoryView":609 + * return self.view.shape[0] + * + * return 0 # <<<<<<<<<<<<<< + * + * def __repr__(self): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":605 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":613 + * def __repr__(self): + * return "" % (self.base.__class__.__name__, + * id(self)) # <<<<<<<<<<<<<< + * + * def __str__(self): + */ + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "View.MemoryView":612 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":611 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__str__", 0); + + /* "View.MemoryView":616 + * + * def __str__(self): + * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":615 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_c_contig", 0); + + /* "View.MemoryView":622 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":623 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< + * + * def is_f_contig(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":619 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_f_contig", 0); + + /* "View.MemoryView":628 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":629 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< + * + * def copy(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":625 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_mslice; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy", 0); + + /* "View.MemoryView":633 + * def copy(self): + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &mslice) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); + + /* "View.MemoryView":635 + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + * + * slice_copy(self, &mslice) # <<<<<<<<<<<<<< + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); + + /* "View.MemoryView":636 + * + * slice_copy(self, &mslice) + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_C_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":641 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< + * + * def copy_fortran(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":631 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy_fortran", 0); + + /* "View.MemoryView":645 + * def copy_fortran(self): + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &src) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); + + /* "View.MemoryView":647 + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + * + * slice_copy(self, &src) # <<<<<<<<<<<<<< + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); + + /* "View.MemoryView":648 + * + * slice_copy(self, &src) + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_F_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) + __pyx_v_dst = __pyx_t_1; + + /* "View.MemoryView":653 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":643 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + +static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { + struct __pyx_memoryview_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); + + /* "View.MemoryView":658 + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< + * result.typeinfo = typeinfo + * return result + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_o); + __Pyx_GIVEREF(__pyx_v_o); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":659 + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_v_result->typeinfo = __pyx_v_typeinfo; + + /* "View.MemoryView":660 + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_check') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":657 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("memoryview_check", 0); + + /* "View.MemoryView":664 + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): + * return isinstance(o, memoryview) # <<<<<<<<<<<<<< + * + * cdef tuple _unellipsify(object index, int ndim): + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "View.MemoryView":663 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + +static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { + PyObject *__pyx_v_tup = NULL; + PyObject *__pyx_v_result = NULL; + int __pyx_v_have_slices; + int __pyx_v_seen_ellipsis; + CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; + PyObject *__pyx_v_item = NULL; + Py_ssize_t __pyx_v_nslices; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + PyObject *(*__pyx_t_6)(PyObject *); + PyObject *__pyx_t_7 = NULL; + Py_ssize_t __pyx_t_8; + int __pyx_t_9; + int __pyx_t_10; + PyObject *__pyx_t_11 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_unellipsify", 0); + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + __pyx_t_1 = PyTuple_Check(__pyx_v_index); + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":672 + * """ + * if not isinstance(index, tuple): + * tup = (index,) # <<<<<<<<<<<<<< + * else: + * tup = index + */ + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_index); + __Pyx_GIVEREF(__pyx_v_index); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); + __pyx_v_tup = __pyx_t_3; + __pyx_t_3 = 0; + + /* "View.MemoryView":671 + * full slices. + * """ + * if not isinstance(index, tuple): # <<<<<<<<<<<<<< + * tup = (index,) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":674 + * tup = (index,) + * else: + * tup = index # <<<<<<<<<<<<<< + * + * result = [] + */ + /*else*/ { + __Pyx_INCREF(__pyx_v_index); + __pyx_v_tup = __pyx_v_index; + } + __pyx_L3:; + + /* "View.MemoryView":676 + * tup = index + * + * result = [] # <<<<<<<<<<<<<< + * have_slices = False + * seen_ellipsis = False + */ + __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_result = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":677 + * + * result = [] + * have_slices = False # <<<<<<<<<<<<<< + * seen_ellipsis = False + * for idx, item in enumerate(tup): + */ + __pyx_v_have_slices = 0; + + /* "View.MemoryView":678 + * result = [] + * have_slices = False + * seen_ellipsis = False # <<<<<<<<<<<<<< + * for idx, item in enumerate(tup): + * if item is Ellipsis: + */ + __pyx_v_seen_ellipsis = 0; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + __Pyx_INCREF(__pyx_int_0); + __pyx_t_3 = __pyx_int_0; + if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { + __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; + __pyx_t_6 = NULL; + } else { + __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_6)) { + if (likely(PyList_CheckExact(__pyx_t_4))) { + if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } else { + if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) + #else + __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + } + } else { + __pyx_t_7 = __pyx_t_6(__pyx_t_4); + if (unlikely(!__pyx_t_7)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 679, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_7); + } + __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_INCREF(__pyx_t_3); + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); + __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_3); + __pyx_t_3 = __pyx_t_7; + __pyx_t_7 = 0; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":683 + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True # <<<<<<<<<<<<<< + * else: + * result.append(slice(None)) + */ + __pyx_v_seen_ellipsis = 1; + + /* "View.MemoryView":681 + * for idx, item in enumerate(tup): + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + * seen_ellipsis = True + */ + goto __pyx_L7; + } + + /* "View.MemoryView":685 + * seen_ellipsis = True + * else: + * result.append(slice(None)) # <<<<<<<<<<<<<< + * have_slices = True + * else: + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":686 + * else: + * result.append(slice(None)) + * have_slices = True # <<<<<<<<<<<<<< + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + */ + __pyx_v_have_slices = 1; + + /* "View.MemoryView":680 + * seen_ellipsis = False + * for idx, item in enumerate(tup): + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) + */ + goto __pyx_L6; + } + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + /*else*/ { + __pyx_t_2 = PySlice_Check(__pyx_v_item); + __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); + if (__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); + __pyx_t_1 = __pyx_t_10; + __pyx_L9_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":689 + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): + * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< + * + * have_slices = have_slices or isinstance(item, slice) + */ + __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(1, 689, __pyx_L1_error) + + /* "View.MemoryView":688 + * have_slices = True + * else: + * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + */ + } + + /* "View.MemoryView":691 + * raise TypeError("Cannot index with type '%s'" % type(item)) + * + * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< + * result.append(item) + * + */ + __pyx_t_10 = (__pyx_v_have_slices != 0); + if (!__pyx_t_10) { + } else { + __pyx_t_1 = __pyx_t_10; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = PySlice_Check(__pyx_v_item); + __pyx_t_2 = (__pyx_t_10 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L11_bool_binop_done:; + __pyx_v_have_slices = __pyx_t_1; + + /* "View.MemoryView":692 + * + * have_slices = have_slices or isinstance(item, slice) + * result.append(item) # <<<<<<<<<<<<<< + * + * nslices = ndim - len(result) + */ + __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) + } + __pyx_L6:; + + /* "View.MemoryView":679 + * have_slices = False + * seen_ellipsis = False + * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":694 + * result.append(item) + * + * nslices = ndim - len(result) # <<<<<<<<<<<<<< + * if nslices: + * result.extend([slice(None)] * nslices) + */ + __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) + __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + __pyx_t_1 = (__pyx_v_nslices != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":696 + * nslices = ndim - len(result) + * if nslices: + * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< + * + * return have_slices or nslices, tuple(result) + */ + __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); + } + } + __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":695 + * + * nslices = ndim - len(result) + * if nslices: # <<<<<<<<<<<<<< + * result.extend([slice(None)] * nslices) + * + */ + } + + /* "View.MemoryView":698 + * result.extend([slice(None)] * nslices) + * + * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + */ + __Pyx_XDECREF(__pyx_r); + if (!__pyx_v_have_slices) { + } else { + __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L14_bool_binop_done; + } + __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __pyx_t_4; + __pyx_t_4 = 0; + __pyx_L14_bool_binop_done:; + __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_r = ((PyObject*)__pyx_t_11); + __pyx_t_11 = 0; + goto __pyx_L0; + + /* "View.MemoryView":666 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_tup); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_XDECREF(__pyx_v_item); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + +static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); + + /* "View.MemoryView":701 + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") + */ + __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); + for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { + __pyx_t_1 = __pyx_t_3; + __pyx_v_suboffset = (__pyx_t_1[0]); + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_Raise(__pyx_t_5, 0, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(1, 703, __pyx_L1_error) + + /* "View.MemoryView":702 + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError("Indirect dimensions not supported") + * + */ + } + } + + /* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { + int __pyx_v_new_ndim; + int __pyx_v_suboffset_dim; + int __pyx_v_dim; + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + __Pyx_memviewslice *__pyx_v_p_src; + struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; + __Pyx_memviewslice *__pyx_v_p_dst; + int *__pyx_v_p_suboffset_dim; + Py_ssize_t __pyx_v_start; + Py_ssize_t __pyx_v_stop; + Py_ssize_t __pyx_v_step; + int __pyx_v_have_start; + int __pyx_v_have_stop; + int __pyx_v_have_step; + PyObject *__pyx_v_index = NULL; + struct __pyx_memoryview_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + struct __pyx_memoryview_obj *__pyx_t_4; + char *__pyx_t_5; + int __pyx_t_6; + Py_ssize_t __pyx_t_7; + PyObject *(*__pyx_t_8)(PyObject *); + PyObject *__pyx_t_9 = NULL; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + Py_ssize_t __pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memview_slice", 0); + + /* "View.MemoryView":711 + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): + * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< + * cdef bint negative_step + * cdef __Pyx_memviewslice src, dst + */ + __pyx_v_new_ndim = 0; + __pyx_v_suboffset_dim = -1; + + /* "View.MemoryView":718 + * + * + * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< + * + * cdef _memoryviewslice memviewsliceobj + */ + (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); + + /* "View.MemoryView":722 + * cdef _memoryviewslice memviewsliceobj + * + * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(!Py_OptimizeFlag)) { + if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { + PyErr_SetNone(PyExc_AssertionError); + __PYX_ERR(1, 722, __pyx_L1_error) + } + } + #endif + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":725 + * + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview # <<<<<<<<<<<<<< + * p_src = &memviewsliceobj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":726 + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, &src) + */ + __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); + + /* "View.MemoryView":724 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + goto __pyx_L3; + } + + /* "View.MemoryView":728 + * p_src = &memviewsliceobj.from_slice + * else: + * slice_copy(memview, &src) # <<<<<<<<<<<<<< + * p_src = &src + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); + + /* "View.MemoryView":729 + * else: + * slice_copy(memview, &src) + * p_src = &src # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_p_src = (&__pyx_v_src); + } + __pyx_L3:; + + /* "View.MemoryView":735 + * + * + * dst.memview = p_src.memview # <<<<<<<<<<<<<< + * dst.data = p_src.data + * + */ + __pyx_t_4 = __pyx_v_p_src->memview; + __pyx_v_dst.memview = __pyx_t_4; + + /* "View.MemoryView":736 + * + * dst.memview = p_src.memview + * dst.data = p_src.data # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_v_p_src->data; + __pyx_v_dst.data = __pyx_t_5; + + /* "View.MemoryView":741 + * + * + * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< + * cdef int *p_suboffset_dim = &suboffset_dim + * cdef Py_ssize_t start, stop, step + */ + __pyx_v_p_dst = (&__pyx_v_dst); + + /* "View.MemoryView":742 + * + * cdef __Pyx_memviewslice *p_dst = &dst + * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< + * cdef Py_ssize_t start, stop, step + * cdef bint have_start, have_stop, have_step + */ + __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + __pyx_t_6 = 0; + if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { + __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; + __pyx_t_8 = NULL; + } else { + __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_8)) { + if (likely(PyList_CheckExact(__pyx_t_3))) { + if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } else { + if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) + #else + __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + #endif + } + } else { + __pyx_t_9 = __pyx_t_8(__pyx_t_3); + if (unlikely(!__pyx_t_9)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 746, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_9); + } + __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); + __pyx_t_9 = 0; + __pyx_v_dim = __pyx_t_6; + __pyx_t_6 = (__pyx_t_6 + 1); + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":751 + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< + * 0, 0, 0, # have_{start,stop,step} + * False) + */ + __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) + + /* "View.MemoryView":748 + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) + + /* "View.MemoryView":747 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + goto __pyx_L6; + } + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + __pyx_t_2 = (__pyx_v_index == Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":755 + * False) + * elif index is None: + * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + */ + (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; + + /* "View.MemoryView":756 + * elif index is None: + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 + */ + (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; + + /* "View.MemoryView":757 + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< + * new_ndim += 1 + * else: + */ + (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; + + /* "View.MemoryView":758 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 # <<<<<<<<<<<<<< + * else: + * start = index.start or 0 + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + + /* "View.MemoryView":754 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + goto __pyx_L6; + } + + /* "View.MemoryView":760 + * new_ndim += 1 + * else: + * start = index.start or 0 # <<<<<<<<<<<<<< + * stop = index.stop or 0 + * step = index.step or 0 + */ + /*else*/ { + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L7_bool_binop_done:; + __pyx_v_start = __pyx_t_10; + + /* "View.MemoryView":761 + * else: + * start = index.start or 0 + * stop = index.stop or 0 # <<<<<<<<<<<<<< + * step = index.step or 0 + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L9_bool_binop_done:; + __pyx_v_stop = __pyx_t_10; + + /* "View.MemoryView":762 + * start = index.start or 0 + * stop = index.stop or 0 + * step = index.step or 0 # <<<<<<<<<<<<<< + * + * have_start = index.start is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + } else { + __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) + __pyx_t_10 = __pyx_t_12; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = 0; + __pyx_L11_bool_binop_done:; + __pyx_v_step = __pyx_t_10; + + /* "View.MemoryView":764 + * step = index.step or 0 + * + * have_start = index.start is not None # <<<<<<<<<<<<<< + * have_stop = index.stop is not None + * have_step = index.step is not None + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_start = __pyx_t_1; + + /* "View.MemoryView":765 + * + * have_start = index.start is not None + * have_stop = index.stop is not None # <<<<<<<<<<<<<< + * have_step = index.step is not None + * + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_stop = __pyx_t_1; + + /* "View.MemoryView":766 + * have_start = index.start is not None + * have_stop = index.stop is not None + * have_step = index.step is not None # <<<<<<<<<<<<<< + * + * slice_memviewslice( + */ + __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_1 = (__pyx_t_9 != Py_None); + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + __pyx_v_have_step = __pyx_t_1; + + /* "View.MemoryView":768 + * have_step = index.step is not None + * + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) + + /* "View.MemoryView":774 + * have_start, have_stop, have_step, + * True) + * new_ndim += 1 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + } + __pyx_L6:; + + /* "View.MemoryView":746 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * slice_memviewslice( + */ + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":778 + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< + * memviewsliceobj.to_dtype_func, + * memview.dtype_is_object) + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } + + /* "View.MemoryView":779 + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * else: + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } + + /* "View.MemoryView":777 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":776 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + } + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + /*else*/ { + __Pyx_XDECREF(((PyObject *)__pyx_r)); + + /* "View.MemoryView":783 + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + + /* "View.MemoryView":782 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":710 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { + Py_ssize_t __pyx_v_new_shape; + int __pyx_v_negative_step; + int __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + __pyx_t_1 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":830 + * + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":829 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + } + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + __pyx_t_1 = (0 <= __pyx_v_start); + if (__pyx_t_1) { + __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); + } + __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":832 + * start += shape + * if not 0 <= start < shape: + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< + * else: + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) + + /* "View.MemoryView":831 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + } + + /* "View.MemoryView":827 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":835 + * else: + * + * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< + * + * if have_step and step == 0: + */ + /*else*/ { + __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step < 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L6_bool_binop_done:; + __pyx_v_negative_step = __pyx_t_2; + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + __pyx_t_1 = (__pyx_v_have_step != 0); + if (__pyx_t_1) { + } else { + __pyx_t_2 = __pyx_t_1; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_1 = ((__pyx_v_step == 0) != 0); + __pyx_t_2 = __pyx_t_1; + __pyx_L9_bool_binop_done:; + if (__pyx_t_2) { + + /* "View.MemoryView":838 + * + * if have_step and step == 0: + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) + + /* "View.MemoryView":837 + * negative_step = have_step != 0 and step < 0 + * + * if have_step and step == 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) + * + */ + } + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + __pyx_t_2 = (__pyx_v_have_start != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":843 + * if have_start: + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if start < 0: + * start = 0 + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + __pyx_t_2 = ((__pyx_v_start < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":845 + * start += shape + * if start < 0: + * start = 0 # <<<<<<<<<<<<<< + * elif start >= shape: + * if negative_step: + */ + __pyx_v_start = 0; + + /* "View.MemoryView":844 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + } + + /* "View.MemoryView":842 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + goto __pyx_L12; + } + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":848 + * elif start >= shape: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = shape + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":847 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L14; + } + + /* "View.MemoryView":850 + * start = shape - 1 + * else: + * start = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + /*else*/ { + __pyx_v_start = __pyx_v_shape; + } + __pyx_L14:; + + /* "View.MemoryView":846 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + } + __pyx_L12:; + + /* "View.MemoryView":841 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + goto __pyx_L11; + } + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":853 + * else: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = 0 + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":852 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L15; + } + + /* "View.MemoryView":855 + * start = shape - 1 + * else: + * start = 0 # <<<<<<<<<<<<<< + * + * if have_stop: + */ + /*else*/ { + __pyx_v_start = 0; + } + __pyx_L15:; + } + __pyx_L11:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + __pyx_t_2 = (__pyx_v_have_stop != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":859 + * if have_stop: + * if stop < 0: + * stop += shape # <<<<<<<<<<<<<< + * if stop < 0: + * stop = 0 + */ + __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + __pyx_t_2 = ((__pyx_v_stop < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":861 + * stop += shape + * if stop < 0: + * stop = 0 # <<<<<<<<<<<<<< + * elif stop > shape: + * stop = shape + */ + __pyx_v_stop = 0; + + /* "View.MemoryView":860 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + } + + /* "View.MemoryView":858 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + goto __pyx_L17; + } + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":863 + * stop = 0 + * elif stop > shape: + * stop = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + __pyx_v_stop = __pyx_v_shape; + + /* "View.MemoryView":862 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + } + __pyx_L17:; + + /* "View.MemoryView":857 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + goto __pyx_L16; + } + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_negative_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":866 + * else: + * if negative_step: + * stop = -1 # <<<<<<<<<<<<<< + * else: + * stop = shape + */ + __pyx_v_stop = -1L; + + /* "View.MemoryView":865 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + goto __pyx_L19; + } + + /* "View.MemoryView":868 + * stop = -1 + * else: + * stop = shape # <<<<<<<<<<<<<< + * + * if not have_step: + */ + /*else*/ { + __pyx_v_stop = __pyx_v_shape; + } + __pyx_L19:; + } + __pyx_L16:; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":871 + * + * if not have_step: + * step = 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_step = 1; + + /* "View.MemoryView":870 + * stop = shape + * + * if not have_step: # <<<<<<<<<<<<<< + * step = 1 + * + */ + } + + /* "View.MemoryView":875 + * + * with cython.cdivision(True): + * new_shape = (stop - start) // step # <<<<<<<<<<<<<< + * + * if (stop - start) - step * new_shape: + */ + __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":878 + * + * if (stop - start) - step * new_shape: + * new_shape += 1 # <<<<<<<<<<<<<< + * + * if new_shape < 0: + */ + __pyx_v_new_shape = (__pyx_v_new_shape + 1); + + /* "View.MemoryView":877 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + } + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":881 + * + * if new_shape < 0: + * new_shape = 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_new_shape = 0; + + /* "View.MemoryView":880 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + } + + /* "View.MemoryView":884 + * + * + * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset + */ + (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); + + /* "View.MemoryView":885 + * + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< + * dst.suboffsets[new_ndim] = suboffset + * + */ + (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; + + /* "View.MemoryView":886 + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; + } + __pyx_L3:; + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":890 + * + * if suboffset_dim[0] < 0: + * dst.data += start * stride # <<<<<<<<<<<<<< + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride + */ + __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); + + /* "View.MemoryView":889 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + goto __pyx_L23; + } + + /* "View.MemoryView":892 + * dst.data += start * stride + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< + * + * if suboffset >= 0: + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_suboffset_dim[0]); + (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); + } + __pyx_L23:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":897 + * if not is_slice: + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + */ + __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":896 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + goto __pyx_L26; + } + + /* "View.MemoryView":899 + * dst.data = ( dst.data)[0] + suboffset + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< + * "must be indexed and not sliced", dim) + * else: + */ + /*else*/ { + + /* "View.MemoryView":900 + * else: + * _err_dim(IndexError, "All dimensions preceding dimension %d " + * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< + * else: + * suboffset_dim[0] = new_ndim + */ + __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) + } + __pyx_L26:; + + /* "View.MemoryView":895 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + goto __pyx_L25; + } + + /* "View.MemoryView":902 + * "must be indexed and not sliced", dim) + * else: + * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< + * + * return 0 + */ + /*else*/ { + (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; + } + __pyx_L25:; + + /* "View.MemoryView":894 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + } + + /* "View.MemoryView":904 + * suboffset_dim[0] = new_ndim + * + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":807 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + +static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_suboffset; + Py_ssize_t __pyx_v_itemsize; + char *__pyx_v_resultp; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("pybuffer_index", 0); + + /* "View.MemoryView":912 + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< + * cdef Py_ssize_t itemsize = view.itemsize + * cdef char *resultp + */ + __pyx_v_suboffset = -1L; + + /* "View.MemoryView":913 + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< + * cdef char *resultp + * + */ + __pyx_t_1 = __pyx_v_view->itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":917 + * + * if view.ndim == 0: + * shape = view.len / itemsize # <<<<<<<<<<<<<< + * stride = itemsize + * else: + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 917, __pyx_L1_error) + } + __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); + + /* "View.MemoryView":918 + * if view.ndim == 0: + * shape = view.len / itemsize + * stride = itemsize # <<<<<<<<<<<<<< + * else: + * shape = view.shape[dim] + */ + __pyx_v_stride = __pyx_v_itemsize; + + /* "View.MemoryView":916 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len / itemsize + * stride = itemsize + */ + goto __pyx_L3; + } + + /* "View.MemoryView":920 + * stride = itemsize + * else: + * shape = view.shape[dim] # <<<<<<<<<<<<<< + * stride = view.strides[dim] + * if view.suboffsets != NULL: + */ + /*else*/ { + __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); + + /* "View.MemoryView":921 + * else: + * shape = view.shape[dim] + * stride = view.strides[dim] # <<<<<<<<<<<<<< + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] + */ + __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":923 + * stride = view.strides[dim] + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< + * + * if index < 0: + */ + __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); + + /* "View.MemoryView":922 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + } + } + __pyx_L3:; + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":926 + * + * if index < 0: + * index += view.shape[dim] # <<<<<<<<<<<<<< + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + */ + __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index < 0) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":928 + * index += view.shape[dim] + * if index < 0: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * if index >= shape: + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 928, __pyx_L1_error) + + /* "View.MemoryView":927 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":925 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + } + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":931 + * + * if index >= shape: + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< + * + * resultp = bufp + index * stride + */ + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 931, __pyx_L1_error) + + /* "View.MemoryView":930 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + */ + } + + /* "View.MemoryView":933 + * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) + * + * resultp = bufp + index * stride # <<<<<<<<<<<<<< + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset + */ + __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":935 + * resultp = bufp + index * stride + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< + * + * return resultp + */ + __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":934 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + } + + /* "View.MemoryView":937 + * resultp = ( resultp)[0] + suboffset + * + * return resultp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_resultp; + goto __pyx_L0; + + /* "View.MemoryView":910 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + +static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { + int __pyx_v_ndim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + int __pyx_v_i; + int __pyx_v_j; + int __pyx_r; + int __pyx_t_1; + Py_ssize_t *__pyx_t_2; + long __pyx_t_3; + long __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":944 + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: + * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< + * + * cdef Py_ssize_t *shape = memslice.shape + */ + __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; + __pyx_v_ndim = __pyx_t_1; + + /* "View.MemoryView":946 + * cdef int ndim = memslice.memview.view.ndim + * + * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< + * cdef Py_ssize_t *strides = memslice.strides + * + */ + __pyx_t_2 = __pyx_v_memslice->shape; + __pyx_v_shape = __pyx_t_2; + + /* "View.MemoryView":947 + * + * cdef Py_ssize_t *shape = memslice.shape + * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = __pyx_v_memslice->strides; + __pyx_v_strides = __pyx_t_2; + + /* "View.MemoryView":951 + * + * cdef int i, j + * for i in range(ndim / 2): # <<<<<<<<<<<<<< + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + */ + __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":952 + * cdef int i, j + * for i in range(ndim / 2): + * j = ndim - 1 - i # <<<<<<<<<<<<<< + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] + */ + __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); + + /* "View.MemoryView":953 + * for i in range(ndim / 2): + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< + * shape[i], shape[j] = shape[j], shape[i] + * + */ + __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); + __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); + (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; + (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; + + /* "View.MemoryView":954 + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + */ + __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); + __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); + (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; + (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); + if (!__pyx_t_8) { + } else { + __pyx_t_7 = __pyx_t_8; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); + __pyx_t_7 = __pyx_t_8; + __pyx_L6_bool_binop_done:; + if (__pyx_t_7) { + + /* "View.MemoryView":957 + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< + * + * return 1 + */ + __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) + + /* "View.MemoryView":956 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + } + } + + /* "View.MemoryView":959 + * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") + * + * return 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 1; + goto __pyx_L0; + + /* "View.MemoryView":943 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = 0; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + +/* Python wrapper */ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":977 + * + * def __dealloc__(self): + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); + + /* "View.MemoryView":976 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":981 + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) # <<<<<<<<<<<<<< + * else: + * return memoryview.convert_item_to_object(self, itemp) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":980 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + } + + /* "View.MemoryView":983 + * return self.to_object_func(itemp) + * else: + * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":979 + * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":987 + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< + * else: + * memoryview.assign_item_from_object(self, itemp, value) + */ + __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) + + /* "View.MemoryView":986 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":989 + * self.to_dtype_func(itemp, value) + * else: + * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< + * + * @property + */ + /*else*/ { + __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":985 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":993 + * @property + * def base(self): + * return self.from_object # <<<<<<<<<<<<<< + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->from_object); + __pyx_r = __pyx_v_self->from_object; + goto __pyx_L0; + + /* "View.MemoryView":992 + * + * @property + * def base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + __Pyx_TypeInfo *__pyx_t_4; + Py_buffer __pyx_t_5; + Py_ssize_t *__pyx_t_6; + Py_ssize_t *__pyx_t_7; + Py_ssize_t *__pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_fromslice", 0); + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1008 + * + * if memviewslice.memview == Py_None: + * return None # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + } + + /* "View.MemoryView":1013 + * + * + * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< + * + * result.from_slice = memviewslice + */ + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1015 + * result = _memoryviewslice(None, 0, dtype_is_object) + * + * result.from_slice = memviewslice # <<<<<<<<<<<<<< + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + */ + __pyx_v_result->from_slice = __pyx_v_memviewslice; + + /* "View.MemoryView":1016 + * + * result.from_slice = memviewslice + * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< + * + * result.from_object = ( memviewslice.memview).base + */ + __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); + + /* "View.MemoryView":1018 + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< + * result.typeinfo = memviewslice.memview.typeinfo + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_v_result->from_object); + __Pyx_DECREF(__pyx_v_result->from_object); + __pyx_v_result->from_object = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":1019 + * + * result.from_object = ( memviewslice.memview).base + * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< + * + * result.view = memviewslice.memview.view + */ + __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; + __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; + + /* "View.MemoryView":1021 + * result.typeinfo = memviewslice.memview.typeinfo + * + * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + */ + __pyx_t_5 = __pyx_v_memviewslice.memview->view; + __pyx_v_result->__pyx_base.view = __pyx_t_5; + + /* "View.MemoryView":1022 + * + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + */ + __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); + + /* "View.MemoryView":1023 + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data + * result.view.ndim = ndim # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; + + /* "View.MemoryView":1024 + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; + + /* "View.MemoryView":1025 + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1028 + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< + * else: + * result.flags = PyBUF_RECORDS_RO + */ + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1030 + * result.flags = PyBUF_RECORDS + * else: + * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< + * + * result.view.shape = result.from_slice.shape + */ + /*else*/ { + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; + } + __pyx_L4:; + + /* "View.MemoryView":1032 + * result.flags = PyBUF_RECORDS_RO + * + * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< + * result.view.strides = result.from_slice.strides + * + */ + __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); + + /* "View.MemoryView":1033 + * + * result.view.shape = result.from_slice.shape + * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); + + /* "View.MemoryView":1036 + * + * + * result.view.suboffsets = NULL # <<<<<<<<<<<<<< + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + */ + __pyx_v_result->__pyx_base.view.suboffsets = NULL; + + /* "View.MemoryView":1037 + * + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + */ + __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_v_suboffset = (__pyx_t_6[0]); + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1039 + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); + + /* "View.MemoryView":1040 + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + * break # <<<<<<<<<<<<<< + * + * result.view.len = result.view.itemsize + */ + goto __pyx_L6_break; + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + } + } + __pyx_L6_break:; + + /* "View.MemoryView":1042 + * break + * + * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< + * for length in result.view.shape[:ndim]: + * result.view.len *= length + */ + __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + + /* "View.MemoryView":1043 + * + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< + * result.view.len *= length + * + */ + __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1044 + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: + * result.view.len *= length # <<<<<<<<<<<<<< + * + * result.to_object_func = to_object_func + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + } + + /* "View.MemoryView":1046 + * result.view.len *= length + * + * result.to_object_func = to_object_func # <<<<<<<<<<<<<< + * result.to_dtype_func = to_dtype_func + * + */ + __pyx_v_result->to_object_func = __pyx_v_to_object_func; + + /* "View.MemoryView":1047 + * + * result.to_object_func = to_object_func + * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; + + /* "View.MemoryView":1049 + * result.to_dtype_func = to_dtype_func + * + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_result)); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { + struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; + __Pyx_memviewslice *__pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_slice_from_memview", 0); + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1056 + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): + * obj = memview # <<<<<<<<<<<<<< + * return &obj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_3); + __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":1057 + * if isinstance(memview, _memoryviewslice): + * obj = memview + * return &obj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, mslice) + */ + __pyx_r = (&__pyx_v_obj->from_slice); + goto __pyx_L0; + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + } + + /* "View.MemoryView":1059 + * return &obj.from_slice + * else: + * slice_copy(memview, mslice) # <<<<<<<<<<<<<< + * return mslice + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); + + /* "View.MemoryView":1060 + * else: + * slice_copy(memview, mslice) + * return mslice # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_slice_copy') + */ + __pyx_r = __pyx_v_mslice; + goto __pyx_L0; + } + + /* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_obj); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { + int __pyx_v_dim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + Py_ssize_t *__pyx_v_suboffsets; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + Py_ssize_t __pyx_t_5; + __Pyx_RefNannySetupContext("slice_copy", 0); + + /* "View.MemoryView":1067 + * cdef (Py_ssize_t*) shape, strides, suboffsets + * + * shape = memview.view.shape # <<<<<<<<<<<<<< + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets + */ + __pyx_t_1 = __pyx_v_memview->view.shape; + __pyx_v_shape = __pyx_t_1; + + /* "View.MemoryView":1068 + * + * shape = memview.view.shape + * strides = memview.view.strides # <<<<<<<<<<<<<< + * suboffsets = memview.view.suboffsets + * + */ + __pyx_t_1 = __pyx_v_memview->view.strides; + __pyx_v_strides = __pyx_t_1; + + /* "View.MemoryView":1069 + * shape = memview.view.shape + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< + * + * dst.memview = <__pyx_memoryview *> memview + */ + __pyx_t_1 = __pyx_v_memview->view.suboffsets; + __pyx_v_suboffsets = __pyx_t_1; + + /* "View.MemoryView":1071 + * suboffsets = memview.view.suboffsets + * + * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< + * dst.data = memview.view.buf + * + */ + __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); + + /* "View.MemoryView":1072 + * + * dst.memview = <__pyx_memoryview *> memview + * dst.data = memview.view.buf # <<<<<<<<<<<<<< + * + * for dim in range(memview.view.ndim): + */ + __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); + + /* "View.MemoryView":1074 + * dst.data = memview.view.buf + * + * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + */ + __pyx_t_2 = __pyx_v_memview->view.ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_dim = __pyx_t_4; + + /* "View.MemoryView":1075 + * + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + */ + (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); + + /* "View.MemoryView":1076 + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + * + */ + (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); + + /* "View.MemoryView":1077 + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object') + */ + if ((__pyx_v_suboffsets != 0)) { + __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); + } else { + __pyx_t_5 = -1L; + } + (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; + } + + /* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { + __Pyx_memviewslice __pyx_v_memviewslice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy", 0); + + /* "View.MemoryView":1083 + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< + * return memoryview_copy_from_slice(memview, &memviewslice) + * + */ + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); + + /* "View.MemoryView":1084 + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) + * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object_from_slice') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { + PyObject *(*__pyx_v_to_object_func)(char *); + int (*__pyx_v_to_dtype_func)(char *, PyObject *); + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *(*__pyx_t_3)(char *); + int (*__pyx_t_4)(char *, PyObject *); + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1095 + * + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + */ + __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; + __pyx_v_to_object_func = __pyx_t_3; + + /* "View.MemoryView":1096 + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< + * else: + * to_object_func = NULL + */ + __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; + __pyx_v_to_dtype_func = __pyx_t_4; + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1098 + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + * to_object_func = NULL # <<<<<<<<<<<<<< + * to_dtype_func = NULL + * + */ + /*else*/ { + __pyx_v_to_object_func = NULL; + + /* "View.MemoryView":1099 + * else: + * to_object_func = NULL + * to_dtype_func = NULL # <<<<<<<<<<<<<< + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + */ + __pyx_v_to_dtype_func = NULL; + } + __pyx_L3:; + + /* "View.MemoryView":1101 + * to_dtype_func = NULL + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< + * to_object_func, to_dtype_func, + * memview.dtype_is_object) + */ + __Pyx_XDECREF(__pyx_r); + + /* "View.MemoryView":1103 + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + * to_object_func, to_dtype_func, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + +static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { + Py_ssize_t __pyx_r; + int __pyx_t_1; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + __pyx_t_1 = ((__pyx_v_arg < 0) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1111 + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: + * return -arg # <<<<<<<<<<<<<< + * else: + * return arg + */ + __pyx_r = (-__pyx_v_arg); + goto __pyx_L0; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: + * if arg < 0: # <<<<<<<<<<<<<< + * return -arg + * else: + */ + } + + /* "View.MemoryView":1113 + * return -arg + * else: + * return arg # <<<<<<<<<<<<<< + * + * @cname('__pyx_get_best_slice_order') + */ + /*else*/ { + __pyx_r = __pyx_v_arg; + goto __pyx_L0; + } + + /* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< + * if arg < 0: + * return -arg + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + +static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { + int __pyx_v_i; + Py_ssize_t __pyx_v_c_stride; + Py_ssize_t __pyx_v_f_stride; + char __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1121 + * """ + * cdef int i + * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< + * cdef Py_ssize_t f_stride = 0 + * + */ + __pyx_v_c_stride = 0; + + /* "View.MemoryView":1122 + * cdef int i + * cdef Py_ssize_t c_stride = 0 + * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_f_stride = 0; + + /* "View.MemoryView":1124 + * cdef Py_ssize_t f_stride = 0 + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1126 + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1127 + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + goto __pyx_L4_break; + + /* "View.MemoryView":1125 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L4_break:; + + /* "View.MemoryView":1129 + * break + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + */ + __pyx_t_1 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_1; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1131 + * for i in range(ndim): + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1132 + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + */ + goto __pyx_L7_break; + + /* "View.MemoryView":1130 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L7_break:; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1135 + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + * return 'C' # <<<<<<<<<<<<<< + * else: + * return 'F' + */ + __pyx_r = 'C'; + goto __pyx_L0; + + /* "View.MemoryView":1134 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + } + + /* "View.MemoryView":1137 + * return 'C' + * else: + * return 'F' # <<<<<<<<<<<<<< + * + * @cython.cdivision(True) + */ + /*else*/ { + __pyx_r = 'F'; + goto __pyx_L0; + } + + /* "View.MemoryView":1116 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + +static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; + Py_ssize_t __pyx_v_dst_extent; + Py_ssize_t __pyx_v_src_stride; + Py_ssize_t __pyx_v_dst_stride; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + + /* "View.MemoryView":1147 + * + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + */ + __pyx_v_src_extent = (__pyx_v_src_shape[0]); + + /* "View.MemoryView":1148 + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] + */ + __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); + + /* "View.MemoryView":1149 + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + */ + __pyx_v_src_stride = (__pyx_v_src_strides[0]); + + /* "View.MemoryView":1150 + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + + /* "View.MemoryView":1154 + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + */ + __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); + if (__pyx_t_2) { + __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); + } + __pyx_t_3 = (__pyx_t_2 != 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L5_bool_binop_done:; + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + if (__pyx_t_1) { + + /* "View.MemoryView":1155 + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); + + /* "View.MemoryView":1153 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1157 + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1158 + * else: + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< + * src_data += src_stride + * dst_data += dst_stride + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); + + /* "View.MemoryView":1159 + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * else: + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1160 + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L4:; + + /* "View.MemoryView":1152 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1162 + * dst_data += dst_stride + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * _copy_strided_to_strided(src_data, src_strides + 1, + * dst_data, dst_strides + 1, + */ + /*else*/ { + __pyx_t_4 = __pyx_v_dst_extent; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1163 + * else: + * for i in range(dst_extent): + * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< + * dst_data, dst_strides + 1, + * src_shape + 1, dst_shape + 1, + */ + _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); + + /* "View.MemoryView":1167 + * src_shape + 1, dst_shape + 1, + * ndim - 1, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1168 + * ndim - 1, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1140 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + + /* function exit code */ +} + +/* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + +static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + + /* "View.MemoryView":1173 + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< + * src.shape, dst.shape, ndim, itemsize) + * + */ + _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1170 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_size; + Py_ssize_t __pyx_r; + Py_ssize_t __pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + + /* "View.MemoryView":1179 + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< + * + * for shape in src.shape[:ndim]: + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_size = __pyx_t_1; + + /* "View.MemoryView":1181 + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + * + * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< + * size *= shape + * + */ + __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); + for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_shape = (__pyx_t_2[0]); + + /* "View.MemoryView":1182 + * + * for shape in src.shape[:ndim]: + * size *= shape # <<<<<<<<<<<<<< + * + * return size + */ + __pyx_v_size = (__pyx_v_size * __pyx_v_shape); + } + + /* "View.MemoryView":1184 + * size *= shape + * + * return size # <<<<<<<<<<<<<< + * + * @cname('__pyx_fill_contig_strides_array') + */ + __pyx_r = __pyx_v_size; + goto __pyx_L0; + + /* "View.MemoryView":1177 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { + int __pyx_v_idx; + Py_ssize_t __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + __pyx_t_1 = ((__pyx_v_order == 'F') != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1197 + * + * if order == 'F': + * for idx in range(ndim): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + __pyx_t_2 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_idx = __pyx_t_4; + + /* "View.MemoryView":1198 + * if order == 'F': + * for idx in range(ndim): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * else: + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1199 + * for idx in range(ndim): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * else: + * for idx in range(ndim - 1, -1, -1): + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + + /* "View.MemoryView":1196 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1201 + * stride *= shape[idx] + * else: + * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + /*else*/ { + for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { + __pyx_v_idx = __pyx_t_2; + + /* "View.MemoryView":1202 + * else: + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1203 + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * + * return stride + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + } + __pyx_L3:; + + /* "View.MemoryView":1205 + * stride *= shape[idx] + * + * return stride # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_data_to_temp') + */ + __pyx_r = __pyx_v_stride; + goto __pyx_L0; + + /* "View.MemoryView":1187 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) nogil: + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { + int __pyx_v_i; + void *__pyx_v_result; + size_t __pyx_v_itemsize; + size_t __pyx_v_size; + void *__pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + struct __pyx_memoryview_obj *__pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1219 + * cdef void *result + * + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef size_t size = slice_get_size(src, ndim) + * + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1220 + * + * cdef size_t itemsize = src.memview.view.itemsize + * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< + * + * result = malloc(size) + */ + __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); + + /* "View.MemoryView":1222 + * cdef size_t size = slice_get_size(src, ndim) + * + * result = malloc(size) # <<<<<<<<<<<<<< + * if not result: + * _err(MemoryError, NULL) + */ + __pyx_v_result = malloc(__pyx_v_size); + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1224 + * result = malloc(size) + * if not result: + * _err(MemoryError, NULL) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) + + /* "View.MemoryView":1223 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err(MemoryError, NULL) + * + */ + } + + /* "View.MemoryView":1227 + * + * + * tmpslice.data = result # <<<<<<<<<<<<<< + * tmpslice.memview = src.memview + * for i in range(ndim): + */ + __pyx_v_tmpslice->data = ((char *)__pyx_v_result); + + /* "View.MemoryView":1228 + * + * tmpslice.data = result + * tmpslice.memview = src.memview # <<<<<<<<<<<<<< + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + */ + __pyx_t_4 = __pyx_v_src->memview; + __pyx_v_tmpslice->memview = __pyx_t_4; + + /* "View.MemoryView":1229 + * tmpslice.data = result + * tmpslice.memview = src.memview + * for i in range(ndim): # <<<<<<<<<<<<<< + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1230 + * tmpslice.memview = src.memview + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< + * tmpslice.suboffsets[i] = -1 + * + */ + (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); + + /* "View.MemoryView":1231 + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, + */ + (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1233 + * tmpslice.suboffsets[i] = -1 + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< + * ndim, order) + * + */ + (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); + + /* "View.MemoryView":1237 + * + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1239 + * for i in range(ndim): + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< + * + * if slice_is_contig(src[0], order, ndim): + */ + (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1238 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + } + } + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1242 + * + * if slice_is_contig(src[0], order, ndim): + * memcpy(result, src.data, size) # <<<<<<<<<<<<<< + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + */ + (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); + + /* "View.MemoryView":1241 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":1244 + * memcpy(result, src.data, size) + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< + * + * return result + */ + /*else*/ { + copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); + } + __pyx_L9:; + + /* "View.MemoryView":1246 + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":1208 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = NULL; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + +static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_extents", 0); + + /* "View.MemoryView":1254 + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + * (i, extent1, extent2)) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err_dim') + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_3 = 0; + + /* "View.MemoryView":1253 + * cdef int _err_extents(int i, Py_ssize_t extent1, + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< + * (i, extent1, extent2)) + * + */ + __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 1253, __pyx_L1_error) + + /* "View.MemoryView":1251 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError("got differing extents in dimension %d (got %d and %d)" % + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + +static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_dim", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1258 + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: + * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err') + */ + __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_INCREF(__pyx_v_error); + __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 1258, __pyx_L1_error) + + /* "View.MemoryView":1257 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii') % dim) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + +static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err", 0); + __Pyx_INCREF(__pyx_v_error); + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":1263 + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: + * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< + * else: + * raise error + */ + __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_error); + __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + } + } + __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 1263, __pyx_L1_error) + + /* "View.MemoryView":1262 + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: + * if msg != NULL: # <<<<<<<<<<<<<< + * raise error(msg.decode('ascii')) + * else: + */ + } + + /* "View.MemoryView":1265 + * raise error(msg.decode('ascii')) + * else: + * raise error # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_contents') + */ + /*else*/ { + __Pyx_Raise(__pyx_v_error, 0, 0, 0); + __PYX_ERR(1, 1265, __pyx_L1_error) + } + + /* "View.MemoryView":1261 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< + * if msg != NULL: + * raise error(msg.decode('ascii')) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_error); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { + void *__pyx_v_tmpdata; + size_t __pyx_v_itemsize; + int __pyx_v_i; + char __pyx_v_order; + int __pyx_v_broadcasting; + int __pyx_v_direct_copy; + __Pyx_memviewslice __pyx_v_tmp; + int __pyx_v_ndim; + int __pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + void *__pyx_t_7; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + + /* "View.MemoryView":1276 + * Check for overlapping memory and verify the shapes. + * """ + * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + */ + __pyx_v_tmpdata = NULL; + + /* "View.MemoryView":1277 + * """ + * cdef void *tmpdata = NULL + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + */ + __pyx_t_1 = __pyx_v_src.memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1279 + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< + * cdef bint broadcasting = False + * cdef bint direct_copy = False + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); + + /* "View.MemoryView":1280 + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False # <<<<<<<<<<<<<< + * cdef bint direct_copy = False + * cdef __Pyx_memviewslice tmp + */ + __pyx_v_broadcasting = 0; + + /* "View.MemoryView":1281 + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False + * cdef bint direct_copy = False # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice tmp + * + */ + __pyx_v_direct_copy = 0; + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1285 + * + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); + + /* "View.MemoryView":1284 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1287 + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< + * + * cdef int ndim = max(src_ndim, dst_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); + + /* "View.MemoryView":1286 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + } + __pyx_L3:; + + /* "View.MemoryView":1289 + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + __pyx_t_3 = __pyx_v_dst_ndim; + __pyx_t_4 = __pyx_v_src_ndim; + if (((__pyx_t_3 > __pyx_t_4) != 0)) { + __pyx_t_5 = __pyx_t_3; + } else { + __pyx_t_5 = __pyx_t_4; + } + __pyx_v_ndim = __pyx_t_5; + + /* "View.MemoryView":1291 + * cdef int ndim = max(src_ndim, dst_ndim) + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + */ + __pyx_t_5 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_5; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1294 + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + * broadcasting = True # <<<<<<<<<<<<<< + * src.strides[i] = 0 + * else: + */ + __pyx_v_broadcasting = 1; + + /* "View.MemoryView":1295 + * if src.shape[i] == 1: + * broadcasting = True + * src.strides[i] = 0 # <<<<<<<<<<<<<< + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) + */ + (__pyx_v_src.strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1293 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + goto __pyx_L7; + } + + /* "View.MemoryView":1297 + * src.strides[i] = 0 + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< + * + * if src.suboffsets[i] >= 0: + */ + /*else*/ { + __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":1292 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + } + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1300 + * + * if src.suboffsets[i] >= 0: + * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< + * + * if slices_overlap(&src, &dst, ndim, itemsize): + */ + __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) + + /* "View.MemoryView":1299 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + */ + } + } + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1305 + * + * if not slice_is_contig(src, order, ndim): + * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); + + /* "View.MemoryView":1304 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + } + + /* "View.MemoryView":1307 + * order = get_best_order(&dst, ndim) + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< + * src = tmp + * + */ + __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) + __pyx_v_tmpdata = __pyx_t_7; + + /* "View.MemoryView":1308 + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + * src = tmp # <<<<<<<<<<<<<< + * + * if not broadcasting: + */ + __pyx_v_src = __pyx_v_tmp; + + /* "View.MemoryView":1302 + * _err_dim(ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1314 + * + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); + + /* "View.MemoryView":1313 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + goto __pyx_L12; + } + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1316 + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< + * + * if direct_copy: + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); + + /* "View.MemoryView":1315 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + } + __pyx_L12:; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_2 = (__pyx_v_direct_copy != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1320 + * if direct_copy: + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1321 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + */ + (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); + + /* "View.MemoryView":1322 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * free(tmpdata) + * return 0 + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1323 + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1324 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * if order == 'F' == get_best_order(&dst, ndim): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1318 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + } + + /* "View.MemoryView":1310 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = (__pyx_v_order == 'F'); + if (__pyx_t_2) { + __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); + } + __pyx_t_8 = (__pyx_t_2 != 0); + if (__pyx_t_8) { + + /* "View.MemoryView":1329 + * + * + * transpose_memslice(&src) # <<<<<<<<<<<<<< + * transpose_memslice(&dst) + * + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) + + /* "View.MemoryView":1330 + * + * transpose_memslice(&src) + * transpose_memslice(&dst) # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) + + /* "View.MemoryView":1326 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1332 + * transpose_memslice(&dst) + * + * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1333 + * + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + */ + copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1334 + * refcount_copying(&dst, dtype_is_object, ndim, False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * free(tmpdata) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1336 + * refcount_copying(&dst, dtype_is_object, ndim, True) + * + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1337 + * + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_broadcast_leading') + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1268 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + + /* function exit code */ + __pyx_L1_error:; + { + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + } + __pyx_r = -1; + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { + int __pyx_v_i; + int __pyx_v_offset; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + + /* "View.MemoryView":1344 + * int ndim_other) nogil: + * cdef int i + * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); + + /* "View.MemoryView":1346 + * cdef int offset = ndim_other - ndim + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1347 + * + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + */ + (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); + + /* "View.MemoryView":1348 + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + */ + (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1349 + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< + * + * for i in range(offset): + */ + (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); + } + + /* "View.MemoryView":1351 + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + * for i in range(offset): # <<<<<<<<<<<<<< + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + */ + __pyx_t_1 = __pyx_v_offset; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1352 + * + * for i in range(offset): + * mslice.shape[i] = 1 # <<<<<<<<<<<<<< + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 + */ + (__pyx_v_mslice->shape[__pyx_v_i]) = 1; + + /* "View.MemoryView":1353 + * for i in range(offset): + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< + * mslice.suboffsets[i] = -1 + * + */ + (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); + + /* "View.MemoryView":1354 + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1340 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { + int __pyx_t_1; + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + __pyx_t_1 = (__pyx_v_dtype_is_object != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1367 + * + * if dtype_is_object: + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< + * dst.strides, ndim, inc) + * + */ + __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1366 + * + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, + * dst.strides, ndim, inc) + */ + } + + /* "View.MemoryView":1362 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< + * int ndim, bint inc) nogil: + * + */ + + /* function exit code */ +} + +/* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + __Pyx_RefNannyDeclarations + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); + + /* "View.MemoryView":1374 + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) with gil: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif +} + +/* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + +static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); + + /* "View.MemoryView":1381 + * cdef Py_ssize_t i + * + * for i in range(shape[0]): # <<<<<<<<<<<<<< + * if ndim == 1: + * if inc: + */ + __pyx_t_1 = (__pyx_v_shape[0]); + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + __pyx_t_4 = (__pyx_v_inc != 0); + if (__pyx_t_4) { + + /* "View.MemoryView":1384 + * if ndim == 1: + * if inc: + * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * Py_DECREF(( data)[0]) + */ + Py_INCREF((((PyObject **)__pyx_v_data)[0])); + + /* "View.MemoryView":1383 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":1386 + * Py_INCREF(( data)[0]) + * else: + * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + */ + /*else*/ { + Py_DECREF((((PyObject **)__pyx_v_data)[0])); + } + __pyx_L6:; + + /* "View.MemoryView":1382 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + goto __pyx_L5; + } + + /* "View.MemoryView":1388 + * Py_DECREF(( data)[0]) + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, inc) + * + */ + /*else*/ { + + /* "View.MemoryView":1389 + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, + * ndim - 1, inc) # <<<<<<<<<<<<<< + * + * data += strides[0] + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); + } + __pyx_L5:; + + /* "View.MemoryView":1391 + * ndim - 1, inc) + * + * data += strides[0] # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); + } + + /* "View.MemoryView":1377 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc): + * cdef Py_ssize_t i + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { + + /* "View.MemoryView":1400 + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1401 + * bint dtype_is_object) nogil: + * refcount_copying(dst, dtype_is_object, ndim, False) + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1403 + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, + * itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< + * + * + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1397 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + +static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_extent; + int __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + + /* "View.MemoryView":1411 + * size_t itemsize, void *item) nogil: + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t extent = shape[0] + * + */ + __pyx_v_stride = (__pyx_v_strides[0]); + + /* "View.MemoryView":1412 + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] + * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_extent = (__pyx_v_shape[0]); + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1415 + * + * if ndim == 1: + * for i in range(extent): # <<<<<<<<<<<<<< + * memcpy(data, item, itemsize) + * data += stride + */ + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1416 + * if ndim == 1: + * for i in range(extent): + * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< + * data += stride + * else: + */ + (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); + + /* "View.MemoryView":1417 + * for i in range(extent): + * memcpy(data, item, itemsize) + * data += stride # <<<<<<<<<<<<<< + * else: + * for i in range(extent): + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + + /* "View.MemoryView":1414 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1419 + * data += stride + * else: + * for i in range(extent): # <<<<<<<<<<<<<< + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + */ + /*else*/ { + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1420 + * else: + * for i in range(extent): + * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< + * ndim - 1, itemsize, item) + * data += stride + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1422 + * _slice_assign_scalar(data, shape + 1, strides + 1, + * ndim - 1, itemsize, item) + * data += stride # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1407 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) nogil: + */ + + /* function exit code */ +} + +/* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v___pyx_type = 0; + long __pyx_v___pyx_checksum; + PyObject *__pyx_v___pyx_state = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v___pyx_type = values[0]; + __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_v___pyx_state = values[2]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_v___pyx_PickleError = 0; + PyObject *__pyx_v___pyx_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); + if (__pyx_t_1) { + + /* "(tree fragment)":5 + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + */ + __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_n_s_PickleError); + __Pyx_GIVEREF(__pyx_n_s_PickleError); + PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); + __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_t_2); + __pyx_v___pyx_PickleError = __pyx_t_2; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":6 + * if __pyx_checksum != 0xb068931: + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + */ + __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_INCREF(__pyx_v___pyx_PickleError); + __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 6, __pyx_L1_error) + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + */ + } + + /* "(tree fragment)":7 + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v___pyx_result = __pyx_t_3; + __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + __pyx_t_1 = (__pyx_v___pyx_state != Py_None); + __pyx_t_6 = (__pyx_t_1 != 0); + if (__pyx_t_6) { + + /* "(tree fragment)":9 + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) + __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + } + + /* "(tree fragment)":10 + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result # <<<<<<<<<<<<<< + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v___pyx_result); + __pyx_r = __pyx_v___pyx_result; + goto __pyx_L0; + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v___pyx_PickleError); + __Pyx_XDECREF(__pyx_v___pyx_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); + + /* "(tree fragment)":12 + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 12, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v___pyx_result->name); + __Pyx_DECREF(__pyx_v___pyx_result->name); + __pyx_v___pyx_result->name = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 13, __pyx_L1_error) + } + __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_4 = ((__pyx_t_3 > 1) != 0); + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_5 = (__pyx_t_4 != 0); + __pyx_t_2 = __pyx_t_5; + __pyx_L4_bool_binop_done:; + if (__pyx_t_2) { + + /* "(tree fragment)":14 + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< + */ + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 14, __pyx_L1_error) + } + __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_8 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_8)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_8); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + } + } + __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + } + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static struct __pyx_vtabstruct_array __pyx_vtable_array; + +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_array_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_array_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_array; + p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_array(PyObject *o) { + struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_array___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->mode); + Py_CLEAR(p->_format); + (*Py_TYPE(o)->tp_free)(o); +} +static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_array___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { + PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); + if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + v = __pyx_array___getattr__(o, n); + } + return v; +} + +static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); +} + +static PyMethodDef __pyx_methods_array[] = { + {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_array[] = { + {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_array = { + __pyx_array___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_array, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_array = { + __pyx_array___len__, /*mp_length*/ + __pyx_array___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_array = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_array_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_array = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.array", /*tp_name*/ + sizeof(struct __pyx_array_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_array, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + __pyx_tp_getattro_array, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_array, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_array, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_array, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_MemviewEnum_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_MemviewEnum_obj *)o); + p->name = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_Enum(PyObject *o) { + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + Py_CLEAR(p->name); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + if (p->name) { + e = (*v)(p->name, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_Enum(PyObject *o) { + PyObject* tmp; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + tmp = ((PyObject*)p->name); + p->name = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyMethodDef __pyx_methods_Enum[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_MemviewEnum = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.Enum", /*tp_name*/ + sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_Enum, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_MemviewEnum___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_Enum, /*tp_traverse*/ + __pyx_tp_clear_Enum, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_Enum, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_MemviewEnum___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_Enum, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; + +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryview_obj *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryview_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_memoryview; + p->obj = Py_None; Py_INCREF(Py_None); + p->_size = Py_None; Py_INCREF(Py_None); + p->_array_interface = Py_None; Py_INCREF(Py_None); + p->view.obj = NULL; + if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_memoryview(PyObject *o) { + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryview___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->obj); + Py_CLEAR(p->_size); + Py_CLEAR(p->_array_interface); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + if (p->obj) { + e = (*v)(p->obj, a); if (e) return e; + } + if (p->_size) { + e = (*v)(p->_size, a); if (e) return e; + } + if (p->_array_interface) { + e = (*v)(p->_array_interface, a); if (e) return e; + } + if (p->view.obj) { + e = (*v)(p->view.obj, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_memoryview(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + tmp = ((PyObject*)p->obj); + p->obj = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_size); + p->_size = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_array_interface); + p->_array_interface = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + Py_CLEAR(p->view.obj); + return 0; +} +static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_memoryview___setitem__(o, i, v); + } + else { + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); + return -1; + } +} + +static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); +} + +static PyMethodDef __pyx_methods_memoryview[] = { + {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, + {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, + {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, + {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_memoryview[] = { + {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, + {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, + {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, + {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, + {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, + {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, + {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, + {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, + {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PySequenceMethods __pyx_tp_as_sequence_memoryview = { + __pyx_memoryview___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_memoryview, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_memoryview = { + __pyx_memoryview___len__, /*mp_length*/ + __pyx_memoryview___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_memoryview = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_memoryview_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_memoryview = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core.memoryview", /*tp_name*/ + sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_memoryview___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + __pyx_memoryview___str__, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_memoryview, /*tp_traverse*/ + __pyx_tp_clear_memoryview, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_memoryview, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_memoryview, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_memoryview, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; +static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; + +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryviewslice_obj *p; + PyObject *o = __pyx_tp_new_memoryview(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryviewslice_obj *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; + p->from_object = Py_None; Py_INCREF(Py_None); + p->from_slice.memview = NULL; + return o; +} + +static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryviewslice___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->from_object); + PyObject_GC_Track(o); + __pyx_tp_dealloc_memoryview(o); +} + +static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; + if (p->from_object) { + e = (*v)(p->from_object, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear__memoryviewslice(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + __pyx_tp_clear_memoryview(o); + tmp = ((PyObject*)p->from_object); + p->from_object = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + __PYX_XDEC_MEMVIEW(&p->from_slice, 1); + return 0; +} + +static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); +} + +static PyMethodDef __pyx_methods__memoryviewslice[] = { + {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { + {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type___pyx_memoryviewslice = { + PyVarObject_HEAD_INIT(0, 0) + "monotonic_align.core._memoryviewslice", /*tp_name*/ + sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___repr__, /*tp_repr*/ + #else + 0, /*tp_repr*/ + #endif + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + #if CYTHON_COMPILING_IN_PYPY + __pyx_memoryview___str__, /*tp_str*/ + #else + 0, /*tp_str*/ + #endif + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "Internal class for passing memoryview slices to Python", /*tp_doc*/ + __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ + __pyx_tp_clear__memoryviewslice, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods__memoryviewslice, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets__memoryviewslice, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new__memoryviewslice, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_core}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "core", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, + {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, + {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, + {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, + {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, + {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, + {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, + {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, + {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, + {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, + {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, + {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, + {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, + {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, + {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, + {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, + {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, + {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, + {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, + {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, + {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, + {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, + {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, + {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, + {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, + {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, + {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, + {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, + {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, + {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, + {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, + {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, + {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, + {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, + {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, + {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, + {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, + {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, + {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, + {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, + {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, + {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, + {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, + {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) + __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) + __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) + __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) + __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "View.MemoryView":133 + * + * if not self.ndim: + * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "View.MemoryView":136 + * + * if itemsize <= 0: + * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + + /* "View.MemoryView":148 + * + * if not self._shape: + * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "View.MemoryView":176 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "View.MemoryView":192 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "View.MemoryView":418 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "View.MemoryView":495 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "View.MemoryView":520 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "View.MemoryView":570 + * if self.view.strides == NULL: + * + * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "View.MemoryView":577 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_INCREF(__pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_int_neg_1); + PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_tuple__13); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "View.MemoryView":682 + * if item is Ellipsis: + * if not seen_ellipsis: + * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * else: + */ + __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__16); + __Pyx_GIVEREF(__pyx_slice__16); + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__18); + __Pyx_GIVEREF(__pyx_tuple__18); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__19); + __Pyx_GIVEREF(__pyx_tuple__19); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__20); + __Pyx_GIVEREF(__pyx_tuple__20); + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__22); + __Pyx_GIVEREF(__pyx_tuple__22); + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__24); + __Pyx_GIVEREF(__pyx_tuple__24); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__25); + __Pyx_GIVEREF(__pyx_tuple__25); + __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + /* InitThreads.init */ + #ifdef WITH_THREAD +PyEval_InitThreads(); +#endif + +if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + generic = Py_None; Py_INCREF(Py_None); + strided = Py_None; Py_INCREF(Py_None); + indirect = Py_None; Py_INCREF(Py_None); + contiguous = Py_None; Py_INCREF(Py_None); + indirect_contiguous = Py_None; Py_INCREF(Py_None); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_vtabptr_array = &__pyx_vtable_array; + __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; + if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_array.tp_print = 0; + #endif + if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + __pyx_array_type = &__pyx_type___pyx_array; + if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_MemviewEnum.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) + __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; + __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; + __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; + __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; + __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; + __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; + __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; + __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; + __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; + if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryview.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) + __pyx_memoryview_type = &__pyx_type___pyx_memoryview; + __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; + __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; + __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; + __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; + __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; + if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type___pyx_memoryviewslice.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) + __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initcore(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_core(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + static PyThread_type_lock __pyx_t_2[8]; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_monotonic_align__core) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "monotonic_align.core")) { + if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_type_import_code(); + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "monotonic_align/core.pyx":7 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< + * cdef int x + * cdef int y + */ + __pyx_k_ = (-1e9); + + /* "monotonic_align/core.pyx":1 + * cimport cython # <<<<<<<<<<<<<< + * from cython.parallel import prange + * + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":209 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * def __dealloc__(array self): + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_array_type); + + /* "View.MemoryView":286 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(generic); + __Pyx_DECREF_SET(generic, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":287 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(strided); + __Pyx_DECREF_SET(strided, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":288 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect); + __Pyx_DECREF_SET(indirect, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":291 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(contiguous); + __Pyx_DECREF_SET(contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":292 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XGOTREF(indirect_contiguous); + __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":316 + * + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ + * PyThread_allocate_lock(), + */ + __pyx_memoryview_thread_locks_used = 0; + + /* "View.MemoryView":317 + * DEF THREAD_LOCKS_PREALLOCATED = 8 + * cdef int __pyx_memoryview_thread_locks_used = 0 + * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< + * PyThread_allocate_lock(), + * PyThread_allocate_lock(), + */ + __pyx_t_2[0] = PyThread_allocate_lock(); + __pyx_t_2[1] = PyThread_allocate_lock(); + __pyx_t_2[2] = PyThread_allocate_lock(); + __pyx_t_2[3] = PyThread_allocate_lock(); + __pyx_t_2[4] = PyThread_allocate_lock(); + __pyx_t_2[5] = PyThread_allocate_lock(); + __pyx_t_2[6] = PyThread_allocate_lock(); + __pyx_t_2[7] = PyThread_allocate_lock(); + memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); + + /* "View.MemoryView":549 + * info.obj = self + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryview_type); + + /* "View.MemoryView":995 + * return self.from_object + * + * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + PyType_Modified(__pyx_memoryviewslice_type); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* MemviewSliceInit */ +static int +__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference) +{ + __Pyx_RefNannyDeclarations + int i, retval=-1; + Py_buffer *buf = &memview->view; + __Pyx_RefNannySetupContext("init_memviewslice", 0); + if (unlikely(memviewslice->memview || memviewslice->data)) { + PyErr_SetString(PyExc_ValueError, + "memviewslice is already initialized!"); + goto fail; + } + if (buf->strides) { + for (i = 0; i < ndim; i++) { + memviewslice->strides[i] = buf->strides[i]; + } + } else { + Py_ssize_t stride = buf->itemsize; + for (i = ndim - 1; i >= 0; i--) { + memviewslice->strides[i] = stride; + stride *= buf->shape[i]; + } + } + for (i = 0; i < ndim; i++) { + memviewslice->shape[i] = buf->shape[i]; + if (buf->suboffsets) { + memviewslice->suboffsets[i] = buf->suboffsets[i]; + } else { + memviewslice->suboffsets[i] = -1; + } + } + memviewslice->memview = memview; + memviewslice->data = (char *)buf->buf; + if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { + Py_INCREF(memview); + } + retval = 0; + goto no_fail; +fail: + memviewslice->memview = 0; + memviewslice->data = 0; + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} +#ifndef Py_NO_RETURN +#define Py_NO_RETURN +#endif +static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { + va_list vargs; + char msg[200]; +#ifdef HAVE_STDARG_PROTOTYPES + va_start(vargs, fmt); +#else + va_start(vargs); +#endif + vsnprintf(msg, 200, fmt, vargs); + va_end(vargs); + Py_FatalError(msg); +} +static CYTHON_INLINE int +__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)++; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE int +__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)--; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE void +__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) +{ + int first_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) + return; + if (unlikely(__pyx_get_slice_count(memview) < 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + first_time = __pyx_add_acquisition_count(memview) == 0; + if (unlikely(first_time)) { + if (have_gil) { + Py_INCREF((PyObject *) memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_INCREF((PyObject *) memview); + PyGILState_Release(_gilstate); + } + } +} +static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, + int have_gil, int lineno) { + int last_time; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + memslice->memview = NULL; + return; + } + if (unlikely(__pyx_get_slice_count(memview) <= 0)) + __pyx_fatalerror("Acquisition count is %d (line %d)", + __pyx_get_slice_count(memview), lineno); + last_time = __pyx_sub_acquisition_count(memview) == 1; + memslice->data = NULL; + if (unlikely(last_time)) { + if (have_gil) { + Py_CLEAR(memslice->memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_CLEAR(memslice->memview); + PyGILState_Release(_gilstate); + } + } else { + memslice->memview = NULL; + } +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* None */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { + PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", + name, type->tp_name, Py_TYPE(obj)->tp_name); + return 0; +} + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* BytesEquals */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + +/* None */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { + Py_ssize_t q = a / b; + Py_ssize_t r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* GetAttr */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (!j) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; + if (likely(m && m->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { + Py_ssize_t l = m->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return m->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* ObjectGetItem */ +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { + PyObject *runerr; + Py_ssize_t key_value; + PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; + if (unlikely(!(m && m->sq_item))) { + PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); + return NULL; + } + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { + PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; + if (likely(m && m->mp_subscript)) { + return m->mp_subscript(obj, key); + } + return __Pyx_PyObject_GetIndex(obj, key); +} +#endif + +/* decode_c_string */ +static CYTHON_INLINE PyObject* __Pyx_decode_c_string( + const char* cstring, Py_ssize_t start, Py_ssize_t stop, + const char* encoding, const char* errors, + PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { + Py_ssize_t length; + if (unlikely((start < 0) | (stop < 0))) { + size_t slen = strlen(cstring); + if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, + "c-string too long to convert to Python"); + return NULL; + } + length = (Py_ssize_t) slen; + if (start < 0) { + start += length; + if (start < 0) + start = 0; + } + if (stop < 0) + stop += length; + } + if (unlikely(stop <= start)) + return __Pyx_NewRef(__pyx_empty_unicode); + length = stop - start; + cstring += start; + if (decode_func) { + return decode_func(cstring, length, errors); + } else { + return PyUnicode_Decode(cstring, length, encoding, errors); + } +} + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetAttr3 */ +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r = __Pyx_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +} + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* ExtTypeTest */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0 || (x^b) >= 0)) + return PyInt_FromLong(x); + return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + #endif + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(PyLong_CheckExact(op1))) { + const long b = intval; + long a, x; +#ifdef HAVE_LONG_LONG + const PY_LONG_LONG llb = intval; + PY_LONG_LONG lla, llx; +#endif + const digit* digits = ((PyLongObject*)op1)->ob_digit; + const Py_ssize_t size = Py_SIZE(op1); + if (likely(__Pyx_sst_abs(size) <= 1)) { + a = likely(size) ? digits[0] : 0; + if (size == -1) a = -a; + } else { + switch (size) { + case -2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 2: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 3: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case -4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + case 4: + if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + break; +#ifdef HAVE_LONG_LONG + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + goto long_long; +#endif + } + CYTHON_FALLTHROUGH; + default: return PyLong_Type.tp_as_number->nb_add(op1, op2); + } + } + x = a + b; + return PyLong_FromLong(x); +#ifdef HAVE_LONG_LONG + long_long: + llx = lla + llb; + return PyLong_FromLongLong(llx); +#endif + + + } + #endif + if (PyFloat_CheckExact(op1)) { + const long b = intval; + double a = PyFloat_AS_DOUBLE(op1); + double result; + PyFPE_START_PROTECT("add", return NULL) + result = ((double)a) + (double)b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); + } + return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); +} +#endif + +/* None */ +static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* HasAttr */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (unlikely(!r)) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* SetVTable */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { +#if PY_VERSION_HEX >= 0x02070000 + PyObject *ob = PyCapsule_New(vtable, 0, 0); +#else + PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); +#endif + if (!ob) + goto bad; + if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); + PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + +/* MemviewSliceIsContig */ +static int +__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) +{ + int i, index, step, start; + Py_ssize_t itemsize = mvs.memview->view.itemsize; + if (order == 'F') { + step = 1; + start = 0; + } else { + step = -1; + start = ndim - 1; + } + for (i = 0; i < ndim; i++) { + index = start + step * i; + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + return 0; + itemsize *= mvs.shape[index]; + } + return 1; +} + +/* OverlappingSlices */ +static void +__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, + void **out_start, void **out_end, + int ndim, size_t itemsize) +{ + char *start, *end; + int i; + start = end = slice->data; + for (i = 0; i < ndim; i++) { + Py_ssize_t stride = slice->strides[i]; + Py_ssize_t extent = slice->shape[i]; + if (extent == 0) { + *out_start = *out_end = start; + return; + } else { + if (stride > 0) + end += stride * (extent - 1); + else + start += stride * (extent - 1); + } + } + *out_start = start; + *out_end = end + itemsize; +} +static int +__pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize) +{ + void *start1, *end1, *start2, *end2; + __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); + __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); + return (start1 < end2) && (start2 < end1); +} + +/* Capsule */ +static CYTHON_INLINE PyObject * +__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) +{ + PyObject *cobj; +#if PY_VERSION_HEX >= 0x02070000 + cobj = PyCapsule_New(p, sig, NULL); +#else + cobj = PyCObject_FromVoidPtr(p, NULL); +#endif + return cobj; +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparseable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static PyObject * +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) + return PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + if (*ts != ',' && *ts != ')') + return PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + if (*ts == ',') ts++; + i++; + } + if (i != ndim) + return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return NULL; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return Py_None; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* TypeInfoCompare */ + static int +__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) +{ + int i; + if (!a || !b) + return 0; + if (a == b) + return 1; + if (a->size != b->size || a->typegroup != b->typegroup || + a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { + if (a->typegroup == 'H' || b->typegroup == 'H') { + return a->size == b->size; + } else { + return 0; + } + } + if (a->ndim) { + for (i = 0; i < a->ndim; i++) + if (a->arraysize[i] != b->arraysize[i]) + return 0; + } + if (a->typegroup == 'S') { + if (a->flags != b->flags) + return 0; + if (a->fields || b->fields) { + if (!(a->fields && b->fields)) + return 0; + for (i = 0; a->fields[i].type && b->fields[i].type; i++) { + __Pyx_StructField *field_a = a->fields + i; + __Pyx_StructField *field_b = b->fields + i; + if (field_a->offset != field_b->offset || + !__pyx_typeinfo_cmp(field_a->type, field_b->type)) + return 0; + } + return !a->fields[i].type && !b->fields[i].type; + } + } + return 1; +} + +/* MemviewSliceValidateAndInit */ + static int +__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) +{ + if (buf->shape[dim] <= 1) + return 1; + if (buf->strides) { + if (spec & __Pyx_MEMVIEW_CONTIG) { + if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { + if (unlikely(buf->strides[dim] != sizeof(void *))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly contiguous " + "in dimension %d.", dim); + goto fail; + } + } else if (unlikely(buf->strides[dim] != buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_FOLLOW) { + Py_ssize_t stride = buf->strides[dim]; + if (stride < 0) + stride = -stride; + if (unlikely(stride < buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + } else { + if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not contiguous in " + "dimension %d", dim); + goto fail; + } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not indirect in " + "dimension %d", dim); + goto fail; + } else if (unlikely(buf->suboffsets)) { + PyErr_SetString(PyExc_ValueError, + "Buffer exposes suboffsets but no strides"); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) +{ + if (spec & __Pyx_MEMVIEW_DIRECT) { + if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { + PyErr_Format(PyExc_ValueError, + "Buffer not compatible with direct access " + "in dimension %d.", dim); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_PTR) { + if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly accessible " + "in dimension %d.", dim); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) +{ + int i; + if (c_or_f_flag & __Pyx_IS_F_CONTIG) { + Py_ssize_t stride = 1; + for (i = 0; i < ndim; i++) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not fortran contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { + Py_ssize_t stride = 1; + for (i = ndim - 1; i >- 1; i--) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not C contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } + return 1; +fail: + return 0; +} +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj) +{ + struct __pyx_memoryview_obj *memview, *new_memview; + __Pyx_RefNannyDeclarations + Py_buffer *buf; + int i, spec = 0, retval = -1; + __Pyx_BufFmt_Context ctx; + int from_memoryview = __pyx_memoryview_check(original_obj); + __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); + if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) + original_obj)->typeinfo)) { + memview = (struct __pyx_memoryview_obj *) original_obj; + new_memview = NULL; + } else { + memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + original_obj, buf_flags, 0, dtype); + new_memview = memview; + if (unlikely(!memview)) + goto fail; + } + buf = &memview->view; + if (unlikely(buf->ndim != ndim)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + ndim, buf->ndim); + goto fail; + } + if (new_memview) { + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; + } + if (unlikely((unsigned) buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " + "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", + buf->itemsize, + (buf->itemsize > 1) ? "s" : "", + dtype->name, + dtype->size, + (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->len > 0) { + for (i = 0; i < ndim; i++) { + spec = axes_specs[i]; + if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) + goto fail; + if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) + goto fail; + } + if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) + goto fail; + } + if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, + new_memview != NULL) == -1)) { + goto fail; + } + retval = 0; + goto no_fail; +fail: + Py_XDECREF(new_memview); + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, + &__Pyx_TypeInfo_float, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, + (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* MemviewSliceCopyTemplate */ + static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object) +{ + __Pyx_RefNannyDeclarations + int i; + __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; + struct __pyx_memoryview_obj *from_memview = from_mvs->memview; + Py_buffer *buf = &from_memview->view; + PyObject *shape_tuple = NULL; + PyObject *temp_int = NULL; + struct __pyx_array_obj *array_obj = NULL; + struct __pyx_memoryview_obj *memview_obj = NULL; + __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); + for (i = 0; i < ndim; i++) { + if (unlikely(from_mvs->suboffsets[i] >= 0)) { + PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " + "indirect dimensions (axis %d)", i); + goto fail; + } + } + shape_tuple = PyTuple_New(ndim); + if (unlikely(!shape_tuple)) { + goto fail; + } + __Pyx_GOTREF(shape_tuple); + for(i = 0; i < ndim; i++) { + temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); + if(unlikely(!temp_int)) { + goto fail; + } else { + PyTuple_SET_ITEM(shape_tuple, i, temp_int); + temp_int = NULL; + } + } + array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); + if (unlikely(!array_obj)) { + goto fail; + } + __Pyx_GOTREF(array_obj); + memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + (PyObject *) array_obj, contig_flag, + dtype_is_object, + from_mvs->memview->typeinfo); + if (unlikely(!memview_obj)) + goto fail; + if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) + goto fail; + if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, + dtype_is_object) < 0)) + goto fail; + goto no_fail; +fail: + __Pyx_XDECREF(new_mvs.memview); + new_mvs.memview = NULL; + new_mvs.data = NULL; +no_fail: + __Pyx_XDECREF(shape_tuple); + __Pyx_XDECREF(temp_int); + __Pyx_XDECREF(array_obj); + __Pyx_RefNannyFinishContext(); + return new_mvs; +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { + const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(char) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (char) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { + return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { + return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { + return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(char) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) + case -2: + if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + } +#endif + if (sizeof(char) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + char val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (char) -1; + } + } else { + char val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (char) -1; + val = __Pyx_PyInt_As_char(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to char"); + return (char) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to char"); + return (char) -1; +} + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[4], rtversion[4]; + PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); + if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { + char message[200]; + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ + static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/nemo/collections/tts/modules/monotonic_align/core.pyx b/nemo/collections/tts/modules/monotonic_align/core.pyx new file mode 100644 index 000000000000..bfaabd4d21c2 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/core.pyx @@ -0,0 +1,42 @@ +cimport cython +from cython.parallel import prange + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: + cdef int x + cdef int y + cdef float v_prev + cdef float v_cur + cdef float tmp + cdef int index = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y-1, x] + if x == 0: + if y == 0: + v_prev = 0. + else: + v_prev = max_neg_val + else: + v_prev = value[y-1, x-1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): + index = index - 1 + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: + cdef int b = paths.shape[0] + cdef int i + for i in prange(b, nogil=True): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py new file mode 100644 index 000000000000..34ffd5fa3e35 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -0,0 +1,67 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import numba + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): + """ + Args: + path: int32[:, :] + value: float32[:, :] + t_y: int + t_x: int + max_neg_val: float + """ + index: int = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[y - 1, x] + if x == 0: + if y == 0: + v_prev = 0.0 + else: + v_prev = max_neg_val + else: + v_prev = value[y - 1, x - 1] + value[y, x] += max(v_prev, v_cur) + + for y in range(t_y - 1, -1, -1): + path[y, index] = 1 + if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): + index = index - 1 + + +@numba.jit(nopython=True, boundscheck=False, parallel=True) +def maximum_path_c(paths, values, t_ys, t_xs): + """ + Args: + paths: int32[:, :, :] + values: float32[:, :, :] + t_ys: int[:] + t_xs: int[:] + """ + b: int = paths.shape[0] + for i in numba.prange(b): + maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) + + +if __name__ == '__main__': + pass diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py new file mode 100644 index 000000000000..2410fa8237b9 --- /dev/null +++ b/nemo/collections/tts/modules/monotonic_align/setup.py @@ -0,0 +1,45 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from distutils.core import setup + +import numpy + +setup( + name='monotonic_align', + # ext_modules=cythonize("core.pyx"), + include_dirs=[numpy.get_include()], +) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py new file mode 100644 index 000000000000..99ef7c64c1f6 --- /dev/null +++ b/nemo/collections/tts/modules/vits_modules.py @@ -0,0 +1,1294 @@ +# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# MIT License +# +# Copyright (c) 2021 Jaehyeon Kim +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math + +import torch +import torch.nn as nn +from torch.nn import functional as F +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm + +from nemo.collections.tts.helpers.helpers import ( + convert_pad_shape, + generate_path, + get_mask_from_lengths, + rand_slice_segments, +) +from nemo.collections.tts.helpers.splines import piecewise_rational_quadratic_transform +from nemo.collections.tts.modules.hifigan_modules import ResBlock1, ResBlock2, get_padding, init_weights +from nemo.collections.tts.modules.monotonic_align import maximum_path + +LRELU_SLOPE = 0.1 + + +@torch.jit.script +def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): + n_channels_int = n_channels[0] + in_act = input_a + input_b + t_act = torch.tanh(in_act[:, :n_channels_int, :]) + s_act = torch.sigmoid(in_act[:, n_channels_int:, :]) + acts = t_act * s_act + return acts + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-5): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = nn.Parameter(torch.ones(channels)) + self.beta = nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + x = x.transpose(1, -1) + x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) + return x.transpose(1, -1) + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + assert n_layers > 1, "Number of layers should be larger than 0." + + self.conv_layers = nn.ModuleList() + self.norm_layers = nn.ModuleList() + self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DDSConv(nn.Module): + """ + Dialted and Depth-Separable Convolution + """ + + def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): + super().__init__() + self.channels = channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.drop = nn.Dropout(p_dropout) + self.convs_sep = nn.ModuleList() + self.convs_1x1 = nn.ModuleList() + self.norms_1 = nn.ModuleList() + self.norms_2 = nn.ModuleList() + for i in range(n_layers): + dilation = kernel_size ** i + padding = (kernel_size * dilation - dilation) // 2 + self.convs_sep.append( + nn.Conv1d(channels, channels, kernel_size, groups=channels, dilation=dilation, padding=padding) + ) + self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) + self.norms_1.append(LayerNorm(channels)) + self.norms_2.append(LayerNorm(channels)) + + def forward(self, x, x_mask, g=None): + if g is not None: + x = x + g + for i in range(self.n_layers): + y = self.convs_sep[i](x * x_mask) + y = self.norms_1[i](y) + y = F.gelu(y) + y = self.convs_1x1[i](y) + y = self.norms_2[i](y) + y = F.gelu(y) + y = self.drop(y) + x = x + y + return x * x_mask + + +class WN(torch.nn.Module): + def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): + super(WN, self).__init__() + assert kernel_size % 2 == 1 + self.hidden_channels = hidden_channels + self.kernel_size = (kernel_size,) + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + self.p_dropout = p_dropout + + self.in_layers = torch.nn.ModuleList() + self.res_skip_layers = torch.nn.ModuleList() + self.drop = nn.Dropout(p_dropout) + + if gin_channels != 0: + cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1) + self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') + + for i in range(n_layers): + dilation = dilation_rate ** i + padding = int((kernel_size * dilation - dilation) / 2) + in_layer = torch.nn.Conv1d( + hidden_channels, 2 * hidden_channels, kernel_size, dilation=dilation, padding=padding + ) + in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') + self.in_layers.append(in_layer) + + # last one is not necessary + if i < n_layers - 1: + res_skip_channels = 2 * hidden_channels + else: + res_skip_channels = hidden_channels + + res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) + res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') + self.res_skip_layers.append(res_skip_layer) + + def forward(self, x, x_mask, g=None, **kwargs): + output = torch.zeros_like(x) + n_channels_tensor = torch.IntTensor([self.hidden_channels]) + + if g is not None: + g = self.cond_layer(g) + + for i in range(self.n_layers): + x_in = self.in_layers[i](x) + if g is not None: + cond_offset = i * 2 * self.hidden_channels + g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] + else: + g_l = torch.zeros_like(x_in) + + acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) + acts = self.drop(acts) + + res_skip_acts = self.res_skip_layers[i](acts) + if i < self.n_layers - 1: + res_acts = res_skip_acts[:, : self.hidden_channels, :] + x = (x + res_acts) * x_mask + output = output + res_skip_acts[:, self.hidden_channels :, :] + else: + output = output + res_skip_acts + return output * x_mask + + def remove_weight_norm(self): + if self.gin_channels != 0: + torch.nn.utils.remove_weight_norm(self.cond_layer) + for l in self.in_layers: + torch.nn.utils.remove_weight_norm(l) + for l in self.res_skip_layers: + torch.nn.utils.remove_weight_norm(l) + + +class Log(nn.Module): + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask + logdet = torch.sum(-y, [1, 2]) + return y, logdet + else: + x = torch.exp(x) * x_mask + return x + + +class Flip(nn.Module): + def forward(self, x, *args, reverse=False, **kwargs): + x = torch.flip(x, [1]) + if not reverse: + logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) + return x, logdet + else: + return x + + +class ElementwiseAffine(nn.Module): + def __init__(self, channels): + super().__init__() + self.channels = channels + self.m = nn.Parameter(torch.zeros(channels, 1)) + self.logs = nn.Parameter(torch.zeros(channels, 1)) + + def forward(self, x, x_mask, reverse=False, **kwargs): + if not reverse: + y = self.m + torch.exp(self.logs) * x + y = y * x_mask + logdet = torch.sum(self.logs * x_mask, [1, 2]) + return y, logdet + else: + x = (x - self.m) * torch.exp(-self.logs) * x_mask + return x + + +class ResidualCouplingLayer(nn.Module): + def __init__( + self, + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + p_dropout=0, + gin_channels=0, + mean_only=False, + ): + assert channels % 2 == 0, "channels should be divisible by 2" + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.half_channels = channels // 2 + self.mean_only = mean_only + + self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) + self.enc = WN( + hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels + ) + self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) + self.post.weight.data.zero_() + self.post.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) * x_mask + h = self.enc(h, x_mask, g=g) + stats = self.post(h) * x_mask + if not self.mean_only: + m, logs = torch.split(stats, [self.half_channels] * 2, 1) + else: + m = stats + logs = torch.zeros_like(m) + + if not reverse: + x1 = m + x1 * torch.exp(logs) * x_mask + x = torch.cat([x0, x1], 1) + logdet = torch.sum(logs, [1, 2]) + return x, logdet + else: + x1 = (x1 - m) * torch.exp(-logs) * x_mask + x = torch.cat([x0, x1], 1) + return x + + +class ConvFlow(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.num_bins = num_bins + self.tail_bound = tail_bound + self.half_channels = in_channels // 2 + + self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) + self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask, g=None, reverse=False): + x0, x1 = torch.split(x, [self.half_channels] * 2, 1) + h = self.pre(x0) + h = self.convs(h, x_mask, g=g) + h = self.proj(h) * x_mask + + b, c, t = x0.shape + h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] + + unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt(self.filter_channels) + unnormalized_derivatives = h[..., 2 * self.num_bins :] + + x1, logabsdet = piecewise_rational_quadratic_transform( + x1, + unnormalized_widths, + unnormalized_heights, + unnormalized_derivatives, + inverse=reverse, + tails='linear', + tail_bound=self.tail_bound, + ) + + x = torch.cat([x0, x1], 1) * x_mask + logdet = torch.sum(logabsdet * x_mask, [1, 2]) + if not reverse: + return x, logdet + else: + return x + + +class StochasticDurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): + super().__init__() + filter_channels = in_channels # it needs to be removed from future version. + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.log_flow = Log() + self.flows = nn.ModuleList() + self.flows.append(ElementwiseAffine(2)) + for i in range(n_flows): + self.flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.flows.append(Flip()) + + self.post_pre = nn.Conv1d(1, filter_channels, 1) + self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.post_convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + self.post_flows = nn.ModuleList() + self.post_flows.append(ElementwiseAffine(2)) + for i in range(4): + self.post_flows.append(ConvFlow(2, filter_channels, kernel_size, n_layers=3)) + self.post_flows.append(Flip()) + + self.pre = nn.Conv1d(in_channels, filter_channels, 1) + self.proj = nn.Conv1d(filter_channels, filter_channels, 1) + self.convs = DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, filter_channels, 1) + + def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): + x = torch.detach(x) + x = self.pre(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.convs(x, x_mask) + x = self.proj(x) * x_mask + + # torch.manual_seed(1) + # torch.cuda.manual_seed(1) + if not reverse: + flows = self.flows + assert w is not None + + logdet_tot_q = 0 + h_w = self.post_pre(w) + h_w = self.post_convs(h_w, x_mask) + h_w = self.post_proj(h_w) * x_mask + e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask + z_q = e_q + for flow in self.post_flows: + z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) + logdet_tot_q += logdet_q + z_u, z1 = torch.split(z_q, [1, 1], 1) + u = torch.sigmoid(z_u) * x_mask + z0 = (w - u) * x_mask + logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1, 2]) + logq = torch.sum(-0.5 * (math.log(2 * math.pi) + (e_q ** 2)) * x_mask, [1, 2]) - logdet_tot_q + + logdet_tot = 0 + z0, logdet = self.log_flow(z0, x_mask) + logdet_tot += logdet + z = torch.cat([z0, z1], 1) + for flow in flows: + z, logdet = flow(z, x_mask, g=x, reverse=reverse) + logdet_tot = logdet_tot + logdet + nll = torch.sum(0.5 * (math.log(2 * math.pi) + (z ** 2)) * x_mask, [1, 2]) - logdet_tot + return nll + logq # [b] + else: + flows = list(reversed(self.flows)) + flows = flows[:-2] + [flows[-1]] # remove a useless vflow + z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale + for flow in flows: + z = flow(z, x_mask, g=x, reverse=reverse) + z0, z1 = torch.split(z, [1, 1], 1) + logw = z0 + return logw + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): + super().__init__() + + self.in_channels = in_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.gin_channels = gin_channels + + self.drop = nn.Dropout(p_dropout) + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_1 = LayerNorm(filter_channels) + self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_2 = LayerNorm(filter_channels) + self.proj = nn.Conv1d(filter_channels, 1, 1) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, in_channels, 1) + + def forward(self, x, x_mask, g=None): + x = torch.detach(x) + if g is not None: + g = torch.detach(g) + x = x + self.cond(g) + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class TextEncoder(nn.Module): + def __init__( + self, + n_vocab, + out_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + ): + super().__init__() + self.n_vocab = n_vocab + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) + nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) + + self.encoder = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths): + x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] + x = torch.transpose(x, 1, -1) # [b, h, t] + x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.encoder(x * x_mask, x_mask) + stats = self.proj(x) * x_mask + + m, logs = torch.split(stats, self.out_channels, dim=1) + return x, m, logs, x_mask + + +class ResidualCouplingBlock(nn.Module): + def __init__(self, channels, hidden_channels, kernel_size, dilation_rate, n_layers, n_flows=4, gin_channels=0): + super().__init__() + self.channels = channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.n_flows = n_flows + self.gin_channels = gin_channels + + self.flows = nn.ModuleList() + for i in range(n_flows): + self.flows.append( + ResidualCouplingLayer( + channels, + hidden_channels, + kernel_size, + dilation_rate, + n_layers, + gin_channels=gin_channels, + mean_only=True, + ) + ) + self.flows.append(Flip()) + + def forward(self, x, x_mask, g=None, reverse=False): + if not reverse: + for flow in self.flows: + x, _ = flow(x, x_mask, g=g, reverse=reverse) + else: + for flow in reversed(self.flows): + x = flow(x, x_mask, g=g, reverse=reverse) + return x + + +class PosteriorEncoder(nn.Module): + def __init__( + self, in_channels, out_channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0 + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.hidden_channels = hidden_channels + self.kernel_size = kernel_size + self.dilation_rate = dilation_rate + self.n_layers = n_layers + self.gin_channels = gin_channels + + self.pre = nn.Conv1d(in_channels, hidden_channels, 1) + self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) + self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) + + def forward(self, x, x_lengths, g=None): + x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) + x = self.pre(x) * x_mask + x = self.enc(x, x_mask, g=g) + stats = self.proj(x) * x_mask + m, logs = torch.split(stats, self.out_channels, dim=1) + z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask + return z, m, logs, x_mask + + +class Generator(torch.nn.Module): + def __init__( + self, + initial_channel, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=0, + ): + super(Generator, self).__init__() + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.conv_pre = nn.Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) + resblock = ResBlock1 if resblock == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + nn.ConvTranspose1d( + upsample_initial_channel // (2 ** i), + upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = upsample_initial_channel // (2 ** (i + 1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(resblock(ch, k, d)) + + self.conv_post = nn.Conv1d(ch, 1, 7, 1, padding=3, bias=False) + self.ups.apply(init_weights) + + if gin_channels != 0: + self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) + + def forward(self, x, g=None): + x = self.conv_pre(x) + if g is not None: + x = x + self.cond(g) + + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = torch.zeros(x.shape, dtype=x.dtype, device=x.device) + for j in range(self.num_kernels): + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + self.use_spectral_norm = use_spectral_norm + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(nn.Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), + norm_f(nn.Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), + ] + ) + self.dropout = nn.Dropout(0.3) + self.conv_post = norm_f(nn.Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = self.dropout(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm == False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(nn.Conv1d(1, 16, 15, 1, padding=7)), + norm_f(nn.Conv1d(16, 64, 41, 4, groups=4, padding=20)), + norm_f(nn.Conv1d(64, 256, 41, 4, groups=16, padding=20)), + norm_f(nn.Conv1d(256, 1024, 41, 4, groups=64, padding=20)), + norm_f(nn.Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), + norm_f(nn.Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.dropout = nn.Dropout(0.3) + self.conv_post = norm_f(nn.Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(MultiPeriodDiscriminator, self).__init__() + periods = [2, 3, 5, 7, 11] + + discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] + discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] + self.discriminators = nn.ModuleList(discs) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + y_d_gs.append(y_d_g) + fmap_rs.append(fmap_r) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class SynthesizerTrn(nn.Module): + """ + Synthesizer for Training + """ + + def __init__( + self, + n_vocab, + spec_channels, + segment_size, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + n_speakers=0, + gin_channels=0, + use_sdp=True, + **kwargs + ): + + super().__init__() + self.n_vocab = n_vocab + self.spec_channels = spec_channels + self.inter_channels = inter_channels + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.padding_idx = padding_idx + self.resblock = resblock + self.resblock_kernel_sizes = resblock_kernel_sizes + self.resblock_dilation_sizes = resblock_dilation_sizes + self.upsample_rates = upsample_rates + self.upsample_initial_channel = upsample_initial_channel + self.upsample_kernel_sizes = upsample_kernel_sizes + self.segment_size = segment_size + self.n_speakers = n_speakers + self.gin_channels = gin_channels + + self.use_sdp = use_sdp + + self.enc_p = TextEncoder( + n_vocab, + inter_channels, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size, + p_dropout, + padding_idx, + ) + self.dec = Generator( + inter_channels, + resblock, + resblock_kernel_sizes, + resblock_dilation_sizes, + upsample_rates, + upsample_initial_channel, + upsample_kernel_sizes, + gin_channels=gin_channels, + ) + self.enc_q = PosteriorEncoder( + spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels + ) + self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) + + if use_sdp: + self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) + else: + self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) + + if n_speakers > 1: + self.emb_g = nn.Embedding(n_speakers, gin_channels) + + def forward(self, text, text_len, spec, spec_len, sid=None): + x, mean_prior, logscale_prior, x_mask = self.enc_p(text, text_len) + if self.n_speakers > 1: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + z, mean_posterior, logscale_posterior, y_mask = self.enc_q(spec, spec_len, g=g) + z_p = self.flow(z, y_mask, g=g) + + with torch.no_grad(): + # negative cross-entropy + s_p_sq_r = torch.exp(-2 * logscale_prior) # [b, d, t] + neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logscale_prior, [1], keepdim=True) # [b, 1, t_s] + neg_cent2 = torch.matmul( + -0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r + ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent3 = torch.matmul( + z_p.transpose(1, 2), (mean_prior * s_p_sq_r) + ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] + neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] + neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 + + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() + + w = attn.sum(2) + if self.use_sdp: + l_length = self.dp(x, x_mask, w, g=g) + l_length = l_length / torch.sum(x_mask) + else: + logw_ = torch.log(w + 1e-6) * x_mask + logw = self.dp(x, x_mask, g=g) + l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging + + # expand prior + mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] + logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_slice, ids_slice = rand_slice_segments(z, spec_len, self.segment_size) + audio = self.dec(z_slice, g=g) + return ( + audio, + l_length, + attn, + ids_slice, + x_mask, + y_mask, + (z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior), + ) + + def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): + x, mean_prior, logscale_prior, x_mask = self.enc_p(x, x_lengths) + if self.n_speakers > 1 and sid is not None: + g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + else: + g = None + + if self.use_sdp: + logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + else: + logw = self.dp(x, x_mask, g=g) + w = torch.exp(logw) * x_mask * length_scale + w_ceil = torch.ceil(w) + audio_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + audio_mask = torch.unsqueeze(get_mask_from_lengths(audio_lengths, None), 1).to(x_mask.dtype) + attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(audio_mask, -1) + attn = generate_path(w_ceil, attn_mask) + + mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] + logscale_prior = torch.matmul(attn.squeeze(1), logscale_prior.transpose(1, 2)).transpose( + 1, 2 + ) # [b, t', t], [b, t, d] -> [b, d, t'] + + z_p = mean_prior + torch.randn_like(mean_prior) * torch.exp(logscale_prior) * noise_scale + z = self.flow(z_p, audio_mask, g=g, reverse=True) + audio = self.dec((z * audio_mask)[:, :, :max_len], g=g) + return audio, attn, audio_mask, (z, z_p, mean_prior, logscale_prior) + + # Can be used for emotions + def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + assert self.n_speakers > 1, "n_speakers have to be larger than 1." + g_src = self.emb_g(sid_src).unsqueeze(-1) + g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) + z_p = self.flow(z, y_mask, g=g_src) + z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) + o_hat = self.dec(z_hat * y_mask, g=g_tgt) + return o_hat, y_mask, (z, z_p, z_hat) + + +############## +# Attentions # +############## +class Encoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + window_size=4, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.window_size = window_size + + self.drop = nn.Dropout(p_dropout) + self.attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for _ in range(self.n_layers): + self.attn_layers.append( + MultiHeadAttention( + hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size + ) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class Decoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + proximal_bias=False, + proximal_init=True, + **kwargs + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + + self.drop = nn.Dropout(p_dropout) + self.self_attn_layers = nn.ModuleList() + self.norm_layers_0 = nn.ModuleList() + self.encdec_attn_layers = nn.ModuleList() + self.norm_layers_1 = nn.ModuleList() + self.ffn_layers = nn.ModuleList() + self.norm_layers_2 = nn.ModuleList() + for i in range(self.n_layers): + self.self_attn_layers.append( + MultiHeadAttention( + hidden_channels, + hidden_channels, + n_heads, + p_dropout=p_dropout, + proximal_bias=proximal_bias, + proximal_init=proximal_init, + ) + ) + self.norm_layers_0.append(LayerNorm(hidden_channels)) + self.encdec_attn_layers.append( + MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout) + ) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask, h, h_mask): + """ + x: decoder input + h: encoder output + """ + self_attn_mask = ( + torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))) + .unsqueeze(0) + .unsqueeze(0) + .to(device=x.device, dtype=x.dtype) + ) + encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + x = x * x_mask + for i in range(self.n_layers): + y = self.self_attn_layers[i](x, x, self_attn_mask) + y = self.drop(y) + x = self.norm_layers_0[i](x + y) + + y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels, + out_channels, + n_heads, + p_dropout=0.0, + window_size=None, + heads_share=True, + block_length=None, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.p_dropout = p_dropout + self.window_size = window_size + self.heads_share = heads_share + self.block_length = block_length + self.proximal_bias = proximal_bias + self.proximal_init = proximal_init + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = nn.Conv1d(channels, channels, 1) + self.conv_k = nn.Conv1d(channels, channels, 1) + self.conv_v = nn.Conv1d(channels, channels, 1) + self.conv_o = nn.Conv1d(channels, out_channels, 1) + self.drop = nn.Dropout(p_dropout) + + if window_size is not None: + n_heads_rel = 1 if heads_share else n_heads + rel_stddev = self.k_channels ** -0.5 + self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev) + + nn.init.xavier_uniform_(self.conv_q.weight) + nn.init.xavier_uniform_(self.conv_k.weight) + nn.init.xavier_uniform_(self.conv_v.weight) + if proximal_init: + with torch.no_grad(): + self.conv_k.weight.copy_(self.conv_q.weight) + self.conv_k.bias.copy_(self.conv_q.bias) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + # reshape [b, d, t] -> [b, n_h, t, d_k] + b, d, t_s, t_t = key.size(0), key.size(1), key.size(2), query.size(2) + query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3) + key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3) + + scores = torch.matmul(query / math.sqrt(self.k_channels), key.transpose(-2, -1)) + if self.window_size is not None: + assert t_s == t_t, "Relative attention is only available for self-attention." + key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) + rel_logits = self._matmul_with_relative_keys(query / math.sqrt(self.k_channels), key_relative_embeddings) + scores_local = self._relative_position_to_absolute_position(rel_logits) + scores = scores + scores_local + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + if self.block_length is not None: + assert t_s == t_t, "Local attention is only available for self-attention." + block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length) + scores = scores.masked_fill(block_mask == 0, -1e4) + p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s] + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + if self.window_size is not None: + relative_weights = self._absolute_position_to_relative_position(p_attn) + value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) + output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t] + return output, p_attn + + def _matmul_with_relative_values(self, x, y): + """ + x: [b, h, l, m] + y: [h or 1, m, d] + ret: [b, h, l, d] + """ + ret = torch.matmul(x, y.unsqueeze(0)) + return ret + + def _matmul_with_relative_keys(self, x, y): + """ + x: [b, h, l, d] + y: [h or 1, m, d] + ret: [b, h, l, m] + """ + ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1)) + return ret + + def _get_relative_embeddings(self, relative_embeddings, length): + # Pad first before slice to avoid using cond ops. + pad_length = max(length - (self.window_size + 1), 0) + slice_start_position = max((self.window_size + 1) - length, 0) + slice_end_position = slice_start_position + 2 * length - 1 + if pad_length > 0: + padded_relative_embeddings = F.pad( + relative_embeddings, convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]) + ) + else: + padded_relative_embeddings = relative_embeddings + used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position] + return used_relative_embeddings + + def _relative_position_to_absolute_position(self, x): + """ + x: [b, h, l, 2*l-1] + ret: [b, h, l, l] + """ + batch, heads, length, _ = x.size() + # Concat columns of pad to shift from relative to absolute indexing. + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]])) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + x_flat = x.view([batch, heads, length * 2 * length]) + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]])) + + # Reshape and slice out the padded elements. + x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1 :] + return x_final + + def _absolute_position_to_relative_position(self, x): + """ + x: [b, h, l, l] + ret: [b, h, l, 2*l-1] + """ + batch, heads, length, _ = x.size() + # padd along column + x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]])) + x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]])) + x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:] + return x_final + + def _attention_bias_proximal(self, length): + """Bias for self-attention to encourage attention to close positions. + Args: + length: an integer scalar. + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__( + self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0, activation=None, causal=False + ): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + self.activation = activation + self.causal = causal + + if causal: + self.padding = self._causal_padding + else: + self.padding = self._same_padding + + self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size) + self.conv_2 = nn.Conv1d(filter_channels, out_channels, kernel_size) + self.drop = nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(self.padding(x * x_mask)) + if self.activation == "gelu": + x = x * torch.sigmoid(1.702 * x) + else: + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(self.padding(x * x_mask)) + return x * x_mask + + def _causal_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = self.kernel_size - 1 + pad_r = 0 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x + + def _same_padding(self, x): + if self.kernel_size == 1: + return x + pad_l = (self.kernel_size - 1) // 2 + pad_r = self.kernel_size // 2 + padding = [[0, 0], [0, 0], [pad_l, pad_r]] + x = F.pad(x, convert_pad_shape(padding)) + return x diff --git a/tutorials/nlp/Question_Answering_Squad.ipynb b/tutorials/nlp/Question_Answering_Squad.ipynb new file mode 100755 index 000000000000..532e82f9c216 --- /dev/null +++ b/tutorials/nlp/Question_Answering_Squad.ipynb @@ -0,0 +1,725 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uRLPr0TnIAHO" + }, + "outputs": [], + "source": [ + "BRANCH = 'main'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "o_0K1lsW1dj9" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", + "4. Run this cell to set up dependencies.\n", + "\"\"\"\n", + "# If you're using Google Colab and not running locally, run this cell\n", + "\n", + "# install NeMo\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dzqD2WDFOIN-" + }, + "outputs": [], + "source": [ + "from nemo.utils.exp_manager import exp_manager\n", + "from nemo.collections import nlp as nemo_nlp\n", + "\n", + "import os\n", + "import wget \n", + "import torch\n", + "import pytorch_lightning as pl\n", + "from omegaconf import OmegaConf" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "daYw_Xll2ZR9" + }, + "source": [ + "# Task Description\n", + "Given a question and a context both in natural language, predict the span within the context with a start and end position which indicates the answer to the question.\n", + "For every word in our training dataset we’re going to predict:\n", + "- likelihood this word is the start of the span \n", + "- likelihood this word is the end of the span \n", + "\n", + "We are using a pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) encoder with 2 span prediction heads for prediction start and end position of the answer. The span predictions are token classifiers consisting of a single linear layer. " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZnuziSwJ1yEB" + }, + "source": [ + "# Dataset\n", + "This model expects the dataset to be in [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) format, e.g. a JSON file for each dataset split. \n", + "In the following we will show example for a training file. Each title has one or multiple paragraph entries, each consisting of the text - \"context\", and question-answer entries. Each question-answer entry has:\n", + "* a question\n", + "* a globally unique id\n", + "* a boolean flag \"is_impossible\" which shows if the question is answerable or not\n", + "* in case the question is answerable one answer entry, which contains the text span and its starting character index in the context. If not answerable, the \"answers\" list is empty\n", + "\n", + "The evaluation files (for validation and testing) follow the above format except for it can provide more than one answer to the same question. \n", + "The inference file follows the above format except for it does not require the \"answers\" and \"is_impossible\" keywords.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "TXFORGBv2Jqu" + }, + "source": [ + "\n", + "\n", + "```\n", + "{\n", + " \"data\": [\n", + " {\n", + " \"title\": \"Super_Bowl_50\", \n", + " \"paragraphs\": [\n", + " {\n", + " \"context\": \"Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season. The American Football Conference (AFC) champion Denver Broncos defeated the National Football Conference (NFC) champion Carolina Panthers 24\\u201310 to earn their third Super Bowl title. The game was played on February 7, 2016, at Levi's Stadium in the San Francisco Bay Area at Santa Clara, California. As this was the 50th Super Bowl, the league emphasized the \\\"golden anniversary\\\" with various gold-themed initiatives, as well as temporarily suspending the tradition of naming each Super Bowl game with Roman numerals (under which the game would have been known as \\\"Super Bowl L\\\"), so that the logo could prominently feature the Arabic numerals 50.\", \n", + " \"qas\": [\n", + " {\n", + " \"question\": \"Where did Super Bowl 50 take place?\", \n", + " \"is_impossible\": \"false\", \n", + " \"id\": \"56be4db0acb8001400a502ee\", \n", + " \"answers\": [\n", + " {\n", + " \"answer_start\": \"403\", \n", + " \"text\": \"Santa Clara, California\"\n", + " }\n", + " ]\n", + " },\n", + " {\n", + " \"question\": \"What was the winning score of the Super Bowl 50?\", \n", + " \"is_impossible\": \"true\", \n", + " \"id\": \"56be4db0acb8001400a502ez\", \n", + " \"answers\": [\n", + " ]\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + " }\n", + " ]\n", + "}\n", + "...\n", + "```\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SL58EWkd2ZVb" + }, + "source": [ + "## Download the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "THi6s1Qx2G1k" + }, + "source": [ + "In this notebook we are going download the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset to showcase how to do training and inference. There are two datasets, SQuAD1.0 and SQuAD2.0. SQuAD 1.1, the previous version of the SQuAD dataset, contains 100,000+ question-answer pairs on 500+ articles. SQuAD2.0 dataset combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. \n", + "\n", + "\n", + "To download both datasets, we use [NeMo/examples/nlp/question_answering/get_squad.py](https://github.com/NVIDIA/NeMo/blob/stable/examples/nlp/question_answering/get_squad.py). \n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "tv3qXTTR_hBk" + }, + "outputs": [], + "source": [ + "# set the following paths\n", + "DATA_DIR = \"PATH_TO_DATA\"\n", + "WORK_DIR = \"PATH_TO_CHECKPOINTS_AND_LOGS\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qcz3Djem_hBn" + }, + "outputs": [], + "source": [ + "## download get_squad.py script to download and preprocess the SQuAD data\n", + "os.makedirs(WORK_DIR, exist_ok=True)\n", + "if not os.path.exists(WORK_DIR + '/get_squad.py'):\n", + " print('Downloading get_squad.py...')\n", + " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/question_answering/get_squad.py', WORK_DIR)\n", + "else:\n", + " print ('get_squad.py already exists')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mpzsC41t_hBq" + }, + "outputs": [], + "source": [ + "# download and preprocess the data\n", + "! python $WORK_DIR/get_squad.py --destDir $DATA_DIR" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "m_HLLl6t_hBs" + }, + "source": [ + "after execution of the above cell, your data folder will contain a subfolder \"squad\" the following 4 files for training and evaluation\n", + "- v1.1/train-v1.1.json\n", + "- v1.1/dev-v1.1.json\n", + "- v2.0/train-v2.0.json\n", + "- v2.0/dev-v2.0.json" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qYHcfxPL_hBt" + }, + "outputs": [], + "source": [ + "! ls -LR {DATA_DIR}/squad" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bdpikZVreLlI" + }, + "source": [ + "## Data preprocessing\n", + "\n", + "The input into the model is the concatenation of two tokenized sequences:\n", + "\" [CLS] query [SEP] context [SEP]\".\n", + "This is the tokenization used for BERT, i.e. [WordPiece](https://arxiv.org/pdf/1609.08144.pdf) Tokenizer, which uses the [Google's BERT vocabulary](https://github.com/google-research/bert). This tokenizer is configured with `model.tokenizer.tokenizer_name=bert-base-uncased` and is automatically instantiated using [Huggingface](https://huggingface.co/)'s API. \n", + "The benefit of this tokenizer is that this is compatible with a pretrained BERT model, from which we can finetune instead of training the question answering model from scratch. However, we also support other tokenizers, such as `model.tokenizer.tokenizer_name=sentencepiece`. Unlike the BERT WordPiece tokenizer, the [SentencePiece](https://github.com/google/sentencepiece) tokenizer model needs to be first created from a text file.\n", + "See [02_NLP_Tokenizers.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/nlp/02_NLP_Tokenizers.ipynb) for more details on how to use NeMo Tokenizers." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0q7Y7nyW_hBv" + }, + "source": [ + "# Data and Model Parameters\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "B0b0Tn8M_hBv" + }, + "source": [ + "Note, this is only an example to showcase usage and is not optimized for accuracy. In the following, we will download and adjust the model configuration to create a toy example, where we only use a small fraction of the original dataset. \n", + "\n", + "In order to train the full SQuAD model, leave the model parameters from the configuration file unchanged. This sets NUM_SAMPLES=-1 to use the entire dataset, which will slow down performance significantly. We recommend to use bash script and multi-GPU to accelerate this. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "n8HZrDmr12_-" + }, + "outputs": [], + "source": [ + "# This is the model configuration file that we will download, do not change this\n", + "MODEL_CONFIG = \"question_answering_squad_config.yaml\"\n", + "\n", + "# model parameters, play with these\n", + "BATCH_SIZE = 12\n", + "MAX_SEQ_LENGTH = 384\n", + "# specify BERT-like model, you want to use\n", + "PRETRAINED_BERT_MODEL = \"bert-base-uncased\"\n", + "TOKENIZER_NAME = \"bert-base-uncased\" # tokenizer name\n", + "\n", + "# Number of data examples used for training, validation, test and inference\n", + "TRAIN_NUM_SAMPLES = VAL_NUM_SAMPLES = TEST_NUM_SAMPLES = 5000 \n", + "INFER_NUM_SAMPLES = 5\n", + "\n", + "TRAIN_FILE = f\"{DATA_DIR}/squad/v1.1/train-v1.1.json\"\n", + "VAL_FILE = f\"{DATA_DIR}/squad/v1.1/dev-v1.1.json\"\n", + "TEST_FILE = f\"{DATA_DIR}/squad/v1.1/dev-v1.1.json\"\n", + "INFER_FILE = f\"{DATA_DIR}/squad/v1.1/dev-v1.1.json\"\n", + "\n", + "INFER_PREDICTION_OUTPUT_FILE = \"output_prediction.json\"\n", + "INFER_NBEST_OUTPUT_FILE = \"output_nbest.json\"\n", + "\n", + "# training parameters\n", + "LEARNING_RATE = 0.00003\n", + "\n", + "# number of epochs\n", + "MAX_EPOCHS = 1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "daludzzL2Jba" + }, + "source": [ + "# Model Configuration" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "_whKCxfTMo6Y" + }, + "source": [ + "The model is defined in a config file which declares multiple important sections. They are:\n", + "- **model**: All arguments that will relate to the Model - language model, span prediction, optimizer and schedulers, datasets and any other related information\n", + "\n", + "- **trainer**: Any argument to be passed to PyTorch Lightning" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "T1gA8PsJ13MJ" + }, + "outputs": [], + "source": [ + "# download the model's default configuration file \n", + "config_dir = WORK_DIR + '/configs/'\n", + "os.makedirs(config_dir, exist_ok=True)\n", + "if not os.path.exists(config_dir + MODEL_CONFIG):\n", + " print('Downloading config file...')\n", + " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/question_answering/conf/{MODEL_CONFIG}', config_dir)\n", + "else:\n", + " print ('config file is already exists')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mX3KmWMvSUQw" + }, + "outputs": [], + "source": [ + "# this line will print the entire default config of the model\n", + "config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'\n", + "print(config_path)\n", + "config = OmegaConf.load(config_path)\n", + "print(OmegaConf.to_yaml(config))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZCgWzNBkaQLZ" + }, + "source": [ + "## Setting up data within the config\n", + "\n", + "Among other things, the config file contains dictionaries called dataset, train_ds and validation_ds, test_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.\n", + "\n", + "Specify data paths using `model.train_ds.file`, `model.valuation_ds.file` and `model.test_ds.file`.\n", + "\n", + "Let's now add the data paths to the config." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "LQHCJN-ZaoLp" + }, + "outputs": [], + "source": [ + "config.model.train_ds.file = TRAIN_FILE\n", + "config.model.validation_ds.file = VAL_FILE\n", + "config.model.test_ds.file = TEST_FILE\n", + "\n", + "config.model.train_ds.num_samples = TRAIN_NUM_SAMPLES\n", + "config.model.validation_ds.num_samples = VAL_NUM_SAMPLES\n", + "config.model.test_ds.num_samples = TEST_NUM_SAMPLES\n", + "\n", + "config.model.tokenizer.tokenizer_name = TOKENIZER_NAME" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nB96-3sTc3yk" + }, + "source": [ + "# Building the PyTorch Lightning Trainer\n", + "\n", + "NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem!\n", + "\n", + "Let's first instantiate a Trainer object!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "knF6QeQQdMrH" + }, + "outputs": [], + "source": [ + "# lets modify some trainer configs\n", + "# checks if we have GPU available and uses it\n", + "accelerator = 'gpu' if torch.cuda.is_available() else 'cpu'\n", + "config.trainer.devices = 1\n", + "config.trainer.accelerator = accelerator\n", + "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", + "\n", + "# For mixed precision training, use precision=16 and amp_level=O1\n", + "\n", + "config.trainer.max_epochs = MAX_EPOCHS\n", + "\n", + "# Remove distributed training flags if only running on a single GPU or CPU\n", + "config.trainer.strategy = None\n", + "\n", + "print(\"Trainer config - \\n\")\n", + "print(OmegaConf.to_yaml(config.trainer))\n", + "\n", + "trainer = pl.Trainer(**config.trainer)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8IlEMdVxdr6p" + }, + "source": [ + "# Setting up a NeMo Experiment¶\n", + "\n", + "NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "8uztqGAmdrYt" + }, + "outputs": [], + "source": [ + "config.exp_manager.exp_dir = WORK_DIR\n", + "exp_dir = exp_manager(trainer, config.get(\"exp_manager\", None))\n", + "\n", + "# the exp_dir provides a path to the current experiment for easy access\n", + "exp_dir = str(exp_dir)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "D4jy28fbjekD" + }, + "source": [ + "# Using an Out-Of-Box Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Ins2ZzJckKKo" + }, + "outputs": [], + "source": [ + "# list available pretrained models\n", + "nemo_nlp.models.QAModel.list_available_models()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "iFnzHvkVk-S5" + }, + "outputs": [], + "source": [ + "# load pretained model\n", + "pretrained_model_name=\"qa_squadv1.1_bertbase\"\n", + "model = nemo_nlp.models.QAModel.from_pretrained(model_name=pretrained_model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6FI_nQsJo_11" + }, + "source": [ + "# Model Training" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8tjLhUvL_o7_" + }, + "source": [ + "Before initializing the model, we might want to modify some of the model configs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Xeuc2i7Y_nP5" + }, + "outputs": [], + "source": [ + "# complete list of supported BERT-like models\n", + "nemo_nlp.modules.get_pretrained_lm_models_list()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "RK2xglXyAUOO" + }, + "outputs": [], + "source": [ + "# add the specified above model parameters to the config\n", + "config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL\n", + "config.model.train_ds.batch_size = BATCH_SIZE\n", + "config.model.validation_ds.batch_size = BATCH_SIZE\n", + "config.model.test_ds.batch_size = BATCH_SIZE\n", + "config.model.optim.lr = LEARNING_RATE\n", + "\n", + "print(\"Updated model config - \\n\")\n", + "print(OmegaConf.to_yaml(config.model))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NgsGLydWo-6-" + }, + "outputs": [], + "source": [ + "# initialize the model\n", + "# dataset we'll be prepared for training and evaluation during\n", + "model = nemo_nlp.models.QAModel(cfg=config.model, trainer=trainer)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kQ592Tx4pzyB" + }, + "source": [ + "## Monitoring Training Progress\n", + "Optionally, you can create a Tensorboard visualization to monitor training progress." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mTJr16_pp0aS" + }, + "outputs": [], + "source": [ + "try:\n", + " from google import colab\n", + " COLAB_ENV = True\n", + "except (ImportError, ModuleNotFoundError):\n", + " COLAB_ENV = False\n", + "\n", + "# Load the TensorBoard notebook extension\n", + "if COLAB_ENV:\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir {exp_dir}\n", + "else:\n", + " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "hUvnSpyjp0Dh" + }, + "outputs": [], + "source": [ + "# start the training\n", + "trainer.fit(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JxBiIKMlH8yv" + }, + "source": [ + "After training for 1 epoch, exact match on the evaluation data should be around 59.2%, F1 around 70.2%." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ynCLBmAWFVsM" + }, + "source": [ + "# Evaluation\n", + "\n", + "To see how the model performs, let’s run evaluation on the test dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "XBMCoXAKFtSd" + }, + "outputs": [], + "source": [ + "model.setup_test_data(test_data_config=config.model.test_ds)\n", + "trainer.test(model)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VPdzJVAgSFaJ" + }, + "source": [ + "# Inference\n", + "\n", + "To use the model for creating predictions, let’s run inference on the unlabeled inference dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DQhsamclRtxJ" + }, + "outputs": [], + "source": [ + "# # store test prediction under the experiment output folder\n", + "output_prediction_file = f\"{exp_dir}/{INFER_PREDICTION_OUTPUT_FILE}\"\n", + "output_nbest_file = f\"{exp_dir}/{INFER_NBEST_OUTPUT_FILE}\"\n", + "all_preds, all_nbests = model.inference(file=INFER_FILE, batch_size=5, num_samples=INFER_NUM_SAMPLES, output_nbest_file=output_nbest_file, output_prediction_file=output_prediction_file)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sQpRIOaM_hCQ" + }, + "outputs": [], + "source": [ + "for _, item in all_preds.items():\n", + " print(f\"question: {item[0]} answer: {item[1]}\")\n", + "#The prediction file contains the predicted answer to each question id for the first TEST_NUM_SAMPLES.\n", + "! python -m json.tool $exp_dir/$INFER_PREDICTION_OUTPUT_FILE" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ref1qSonGNhP" + }, + "source": [ + "If you have NeMo installed locally, you can also train the model with \n", + "[NeMo/examples/nlp/question_answering/get_squad.py](https://github.com/NVIDIA/NeMo/blob/stable/examples/nlp/question_answering/question_answering_squad.py).\n", + "\n", + "To run training script, use:\n", + "\n", + "`python question_answering_squad.py model.train_ds.file=TRAIN_FILE model.validation_ds.file=VAL_FILE model.test_ds.file=TEST_FILE`\n", + "\n", + "To improve the performance of the model, train with multi-GPU and a global batch size of 24. So if you use 8 GPUs with `trainer.devices=8`, set `model.train_ds.batch_size=3`" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [ + "daYw_Xll2ZR9" + ], + "name": "Question_Answering_Squad.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.6" + }, + "pycharm": { + "stem_cell": { + "cell_type": "raw", + "metadata": { + "collapsed": false + }, + "source": [] + } + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 90775102cb5ac13f17c48c6ed9caba1234206b05 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 13 Dec 2022 03:41:19 -0800 Subject: [PATCH 219/244] updated version Signed-off-by: Evgeniy Shabalin --- tutorials/00_NeMo_Primer.ipynb | 2 +- tutorials/01_NeMo_Models.ipynb | 516 ++++---- tutorials/02_NeMo_Adapters.ipynb | 8 +- tutorials/AudioTranslationSample.ipynb | 4 +- ...blish_NeMo_Model_On_Hugging_Face_Hub.ipynb | 486 +++---- tutorials/VoiceSwapSample.ipynb | 4 +- .../asr/ASR_CTC_Language_Finetuning.ipynb | 548 ++++---- tutorials/asr/ASR_for_telephony_speech.ipynb | 4 +- tutorials/asr/ASR_with_NeMo.ipynb | 220 ++-- .../asr/ASR_with_Subword_Tokenization.ipynb | 1164 ++++++++--------- tutorials/asr/ASR_with_Transducers.ipynb | 2 +- .../asr/Buffered_Transducer_Inference.ipynb | 2 +- ..._Transducer_Inference_with_LCS_Merge.ipynb | 528 ++++---- tutorials/asr/Intro_to_Transducers.ipynb | 2 +- tutorials/asr/Multilang_ASR.ipynb | 2 +- tutorials/asr/Offline_ASR.ipynb | 2 +- .../Offline_ASR_with_VAD_for_CTC_models.ipynb | 2 +- .../asr/Online_ASR_Microphone_Demo.ipynb | 2 +- tutorials/asr/Online_Noise_Augmentation.ipynb | 2 +- .../Online_Offline_Microphone_VAD_Demo.ipynb | 2 +- .../Online_Offline_Speech_Commands_Demo.ipynb | 2 +- .../asr/Self_Supervised_Pre_Training.ipynb | 2 +- tutorials/asr/Speech_Commands.ipynb | 2 +- tutorials/asr/Streaming_ASR.ipynb | 2 +- tutorials/asr/Voice_Activity_Detection.ipynb | 2 +- .../asr/asr_adapters/ASR_with_Adapters.ipynb | 2 +- ...Language_Models_for_Downstream_Tasks.ipynb | 2 +- tutorials/nlp/02_NLP_Tokenizers.ipynb | 4 +- ...a_Preprocessing_and_Cleaning_for_NMT.ipynb | 2 +- tutorials/nlp/Dialogue.ipynb | 2 +- tutorials/nlp/Entity_Linking_Medical.ipynb | 2 +- tutorials/nlp/GLUE_Benchmark.ipynb | 2 +- ...Joint_Intent_and_Slot_Classification.ipynb | 2 +- ...on_Synthetic_Tabular_Data_Generation.ipynb | 2 +- .../nlp/Punctuation_and_Capitalization.ipynb | 2 +- ...ion_and_Capitalization_Lexical_Audio.ipynb | 2 +- tutorials/nlp/Question_Answering.ipynb | 2 +- .../nlp/Relation_Extraction-BioMegatron.ipynb | 2 +- tutorials/nlp/Text2Sparql.ipynb | 2 +- ...xt_Classification_Sentiment_Analysis.ipynb | 2 +- ...ssification_Named_Entity_Recognition.ipynb | 4 +- .../nlp/Zero_Shot_Intent_Recognition.ipynb | 4 +- .../ASR_with_SpeakerDiarization.ipynb | 2 +- .../Speaker_Diarization_Inference.ipynb | 2 +- .../Speaker_Diarization_Training.ipynb | 2 +- .../Speaker_Identification_Verification.ipynb | 2 +- .../ITN_with_Thutmose_Tagger.ipynb | 2 +- .../Text_(Inverse)_Normalization.ipynb | 2 +- tutorials/text_processing/WFST_Tutorial.ipynb | 2 +- .../tools/CTC_Segmentation_Tutorial.ipynb | 2 +- tutorials/tools/Multispeaker_Simulator.ipynb | 2 +- .../tts/Aligner_Inference_Examples.ipynb | 2 +- tutorials/tts/FastPitch_Finetuning.ipynb | 2 +- .../tts/FastPitch_GermanTTS_Training.ipynb | 2 +- .../tts/FastPitch_MixerTTS_Training.ipynb | 2 +- .../tts/FastPitch_Speaker_Interpolation.ipynb | 2 +- .../tts/Inference_DurationPitchControl.ipynb | 2 +- tutorials/tts/Inference_ModelSelect.ipynb | 2 +- tutorials/tts/NeMo_TTS_Primer.ipynb | 2 +- tutorials/tts/Tacotron2_Training.ipynb | 2 +- 60 files changed, 1792 insertions(+), 1796 deletions(-) diff --git a/tutorials/00_NeMo_Primer.ipynb b/tutorials/00_NeMo_Primer.ipynb index aac1ee3b72c6..5e5dcbb92c1e 100644 --- a/tutorials/00_NeMo_Primer.ipynb +++ b/tutorials/00_NeMo_Primer.ipynb @@ -42,7 +42,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/01_NeMo_Models.ipynb b/tutorials/01_NeMo_Models.ipynb index c537f2c86855..df4491ff15f9 100644 --- a/tutorials/01_NeMo_Models.ipynb +++ b/tutorials/01_NeMo_Models.ipynb @@ -1,24 +1,12 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "01_NeMo_Models.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - } - }, "cells": [ { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ASnx4b5jXsil" }, + "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -37,7 +25,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Install TorchAudio\n", @@ -45,9 +33,7 @@ "\n", "## Grab the config we'll use in this example\n", "!mkdir configs" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -174,17 +160,17 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "piLOgwOPX1FS" }, + "outputs": [], "source": [ "import torch\n", "import nemo\n", "from nemo.core import NeuralModule\n", "from nemo.core import typecheck" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -208,29 +194,29 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "bseLiNoqqQrE" }, + "outputs": [], "source": [ "class MyEmptyModule(NeuralModule):\n", "\n", " def forward(self):\n", " print(\"Neural Module ~ hello world!\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "j4Q36L5urdOQ" }, + "outputs": [], "source": [ "x = MyEmptyModule()\n", "x()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -261,33 +247,33 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ZvC57bbxwXxN" }, + "outputs": [], "source": [ "# Case 1:\n", "embedding = torch.nn.Embedding(num_embeddings=10, embedding_dim=30)\n", "x = torch.randint(high=10, size=(1, 5))\n", "print(\"x :\", x)\n", "print(\"embedding(x) :\", embedding(x).shape)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "sMaqhMBgxe2C" }, + "outputs": [], "source": [ "# Case 2\n", "lstm = torch.nn.LSTM(1, 30, batch_first=True)\n", "x = torch.randn(1, 5, 1)\n", "print(\"x :\", x)\n", "print(\"lstm(x) :\", lstm(x)[0].shape) # Let's take all timestep outputs of the LSTM" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -340,21 +326,23 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "yp0FG8NJt1Jd" }, + "outputs": [], "source": [ "from nemo.core.neural_types import NeuralType\n", "from nemo.core.neural_types import *" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "3tsgs8Fp0-WV" }, + "outputs": [], "source": [ "class EmbeddingModule(NeuralModule):\n", " def __init__(self):\n", @@ -376,9 +364,7 @@ " return {\n", " 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EmbeddedTextType())\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -442,14 +428,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "boxxMniv27vi" }, + "outputs": [], "source": [ "embedding_module = EmbeddingModule()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -462,9 +448,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "SZZOOoCJ2-iV" }, + "outputs": [], "source": [ "class LSTMModule(NeuralModule):\n", " def __init__(self):\n", @@ -486,9 +474,7 @@ " return {\n", " 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation())\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -506,14 +492,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "6LlOJf0C8GN4" }, + "outputs": [], "source": [ "lstm_module = LSTMModule()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -527,17 +513,17 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "giLJlub78-Ja" }, + "outputs": [], "source": [ "# Case 1 [ERROR CELL]\n", "x1 = torch.randint(high=10, size=(1, 5))\n", "print(\"x :\", x1)\n", "print(\"embedding(x) :\", embedding_module(x1).shape)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -553,16 +539,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "2KUj_p6M9L-f" }, + "outputs": [], "source": [ "# Case 1\n", "print(\"x :\", x1)\n", "print(\"embedding(x) :\", embedding_module(x=x1).shape)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -575,17 +561,17 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "FMu3B0-9-CqE" }, + "outputs": [], "source": [ "# Case 2 [ERROR CELL]\n", "x2 = torch.randn(1, 5, 1) # Input = [B=1, T=5, C=1]\n", "print(\"x :\", x2)\n", "print(\"lstm(x) :\", lstm_module(x=x2)[0].shape) # Let's take all timestep outputs of the LSTM" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -611,9 +597,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "q2u-keAM-d-B" }, + "outputs": [], "source": [ "class CorrectLSTMModule(LSTMModule): # Let's inherit the wrong class to make it easy to override\n", " @property\n", @@ -622,9 +610,7 @@ " 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation()),\n", " 'h_c': [NeuralType(axes=('D', 'B', 'C'), elements_type=EncodedRepresentation())],\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -641,20 +627,22 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "GyPZH-fz_dG4" }, + "outputs": [], "source": [ "lstm_module = CorrectLSTMModule()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "9whH50PE_Xyx" }, + "outputs": [], "source": [ "# Case 2\n", "x2 = torch.randn(1, 5, 1)\n", @@ -663,9 +651,7 @@ "print(\"lstm(x) :\", y2.shape) # The output of the LSTM RNN\n", "print(\"hidden state (h) :\", h.shape) # The first hidden state of the LSTM RNN\n", "print(\"hidden state (c) :\", c.shape) # The second hidden state of the LSTM RNN" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -683,30 +669,30 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "bGQ9XbWU_ffa" }, + "outputs": [], "source": [ "emb_out = embedding_module(x=x1)\n", "lstm_out = lstm_module(x=x2)[0]\n", "\n", "assert hasattr(emb_out, 'neural_type')\n", "assert hasattr(lstm_out, 'neural_type')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "kEpBruSOScPJ" }, + "outputs": [], "source": [ "print(\"Embedding tensor :\", emb_out.neural_type)\n", "print(\"LSTM tensor :\", lstm_out.neural_type)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -724,25 +710,25 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "8AU9FMtdATIm" }, + "outputs": [], "source": [ "emb_out.neural_type.compare(lstm_out.neural_type)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "2cqnqAGIBCjA" }, + "outputs": [], "source": [ "emb_out.neural_type == lstm_out.neural_type" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -775,9 +761,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "AGbKB4gJEzcU" }, + "outputs": [], "source": [ "embedding_module = EmbeddingModule()\n", "x1 = torch.randint(high=10, size=(1, 5))\n", @@ -786,23 +774,21 @@ "x1.neural_type = NeuralType(('B', 'T'), Index())\n", "\n", "print(\"embedding(x) :\", embedding_module(x=x1).shape)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "F0j-evylFM5j" }, + "outputs": [], "source": [ "# Attach wrong neural type [ERROR CELL]\n", "x1.neural_type = NeuralType(('B', 'T'), LabelsType())\n", "\n", "print(\"embedding(x) :\", embedding_module(x=x1).shape)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -819,9 +805,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "raFkuSRaBAE0" }, + "outputs": [], "source": [ "import math\n", "from typing import List, Set, Dict, Tuple, Optional\n", @@ -829,9 +817,7 @@ "import torch\n", "import torch.nn as nn\n", "from torch.nn import functional as F" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -848,9 +834,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ybhLLVyUF0mo" }, + "outputs": [], "source": [ "class AttentionType(EncodedRepresentation):\n", " \"\"\"Basic Attention Element Type\"\"\"\n", @@ -860,9 +848,7 @@ "\n", "class CausalSelfAttentionType(SelfAttentionType):\n", " \"\"\"Causal Self Attention Element Type\"\"\"" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -879,9 +865,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "w4oXpAL_CoDp" }, + "outputs": [], "source": [ "class CausalSelfAttention(nn.Module):\n", " \"\"\"\n", @@ -946,9 +934,7 @@ " x = x + self.attn(self.ln1(x))\n", " x = x + self.mlp(self.ln2(x))\n", " return x" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -980,16 +966,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "0TsfmCYthMux" }, + "outputs": [], "source": [ "import pytorch_lightning as ptl\n", "from nemo.core import ModelPT\n", "from omegaconf import OmegaConf" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1005,9 +991,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "98x9-Fh-HVwj" }, + "outputs": [], "source": [ "class PTLGPT(ptl.LightningModule):\n", " def __init__(self,\n", @@ -1077,9 +1065,7 @@ " elif isinstance(module, nn.LayerNorm):\n", " module.bias.data.zero_()\n", " module.weight.data.fill_(1.0)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1093,14 +1079,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "rrXIBzg4wutC" }, + "outputs": [], "source": [ "m = PTLGPT(vocab_size=100, block_size=32, n_layer=1, n_embd=32, n_head=4)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1152,9 +1138,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "uYwMyjqK05RL" }, + "outputs": [], "source": [ "class GPTEmbedding(NeuralModule):\n", " def __init__(self, vocab_size: int, n_embd: int, block_size: int, embd_pdrop: float = 0.0):\n", @@ -1186,9 +1174,7 @@ " return {\n", " 'embeddings': NeuralType(('B', 'T', 'C'), EmbeddedTextType())\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1217,9 +1203,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "1QeQnQ_G2PwH" }, + "outputs": [], "source": [ "class GPTTransformerEncoder(NeuralModule):\n", " def __init__(self, n_embd: int, block_size: int, n_head: int, n_layer: int, attn_pdrop: float = 0.0, resid_pdrop: float = 0.0):\n", @@ -1243,9 +1231,7 @@ " return {\n", " 'encoding': NeuralType(('B', 'T', 'C'), CausalSelfAttentionType())\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1268,9 +1254,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "VCPUu0EWQIBX" }, + "outputs": [], "source": [ "class GPTDecoder(NeuralModule):\n", " def __init__(self, n_embd: int, vocab_size: int):\n", @@ -1295,9 +1283,7 @@ " return {\n", " 'logits': NeuralType(('B', 'T', 'C'), LogitsType())\n", " }\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1314,9 +1300,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ZQlmtYU6iDwi" }, + "outputs": [], "source": [ "class AbstractNeMoGPT(ModelPT):\n", " def __init__(self, cfg: OmegaConf, trainer: ptl.Trainer = None):\n", @@ -1375,9 +1363,7 @@ " return {\n", " 'logits': NeuralType(('B', 'T', 'C'), LogitsType())\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1396,9 +1382,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "uygo0BEYjKuj" }, + "outputs": [], "source": [ "# model definition args (required)\n", "# ================================\n", @@ -1413,9 +1401,7 @@ "# embd_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on input embeddings\n", "# resid_pdrop: float = 0.1, # \\in [0,1]: amount of dropout in each residual connection\n", "# attn_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on the attention matrix" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1431,27 +1417,27 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "XqLSZq7Soo2j" }, + "outputs": [], "source": [ "from omegaconf import MISSING" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "JTH-1vu8TO7o" }, + "outputs": [], "source": [ "# Let's create a utility for building the class path\n", "def get_class_path(cls):\n", " return f'{cls.__module__}.{cls.__name__}'" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1466,9 +1452,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ZCvLdOlMVLy_" }, + "outputs": [], "source": [ "common_config = OmegaConf.create({\n", " 'vocab_size': MISSING,\n", @@ -1477,9 +1465,7 @@ " 'n_embd': MISSING,\n", " 'n_head': MISSING,\n", "})" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1510,9 +1496,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ntsxQKH0pDac" }, + "outputs": [], "source": [ "embedding_config = OmegaConf.create({\n", " '_target_': get_class_path(GPTEmbedding),\n", @@ -1538,9 +1526,7 @@ " 'n_embd': '${model.n_embd}',\n", " 'vocab_size': '${model.vocab_size}'\n", "})" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1591,9 +1577,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "c8hvNeB_aDgi" }, + "outputs": [], "source": [ "model_config = OmegaConf.create({\n", " 'model': common_config\n", @@ -1603,9 +1591,7 @@ "model_config.model.embedding = embedding_config\n", "model_config.model.encoder = encoder_config\n", "model_config.model.decoder = decoder_config" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1619,14 +1605,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "2SyKNgp9pG0N" }, + "outputs": [], "source": [ "print(OmegaConf.to_yaml(model_config))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1642,20 +1628,22 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "0X4C76JyOAnN" }, + "outputs": [], "source": [ "import copy" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ugxA0TPtbHVZ" }, + "outputs": [], "source": [ "temp_config = copy.deepcopy(model_config)\n", "temp_config.model.vocab_size = 10\n", @@ -1666,9 +1654,7 @@ "\n", "temp_config = OmegaConf.create(OmegaConf.to_container(temp_config, resolve=True))\n", "print(OmegaConf.to_yaml(temp_config))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1682,21 +1668,23 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "IIIVi2IfpsJ4" }, + "outputs": [], "source": [ "# Let's work on a copy of the model config and update it before we send it into the Model.\n", "cfg = copy.deepcopy(model_config)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "OllBhswPqQXq" }, + "outputs": [], "source": [ "# Let's set the values of the config (for some plausible small model)\n", "cfg.model.vocab_size = 100\n", @@ -1704,32 +1692,30 @@ "cfg.model.n_layer = 1\n", "cfg.model.n_embd = 32\n", "cfg.model.n_head = 4" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "QJm2LnTqqcIM" }, + "outputs": [], "source": [ "print(OmegaConf.to_yaml(cfg))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "E7tpB8BcqeBO" }, + "outputs": [], "source": [ "# Try to create a model with this config [ERROR CELL]\n", "m = AbstractNeMoGPT(cfg.model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1759,20 +1745,22 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Vcwi1lO7t7Sm" }, + "outputs": [], "source": [ "from nemo.core.classes.common import PretrainedModelInfo" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ckCxyVLYqrz0" }, + "outputs": [], "source": [ "class BasicNeMoGPT(AbstractNeMoGPT):\n", "\n", @@ -1788,9 +1776,7 @@ " \n", " def setup_test_data(self, test_data_config: OmegaConf):\n", " self._test_dl = None" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1804,14 +1790,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "G8iYQSC5vptU" }, + "outputs": [], "source": [ "m = BasicNeMoGPT(cfg.model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1836,9 +1822,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "QU3oQAVovxRg" }, + "outputs": [], "source": [ "class BasicNeMoGPTWithSteps(BasicNeMoGPT):\n", "\n", @@ -1868,20 +1856,18 @@ " def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):\n", " test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()\n", " return {'test_loss': test_loss_mean}" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "2Ki3kRxag511" }, + "outputs": [], "source": [ "m = BasicNeMoGPTWithSteps(cfg=cfg.model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1932,9 +1918,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "FgXkZQiVjnOv" }, + "outputs": [], "source": [ "class BasicNeMoGPTWithOptim(BasicNeMoGPTWithSteps):\n", "\n", @@ -1983,20 +1971,18 @@ " ]\n", " optimizer = torch.optim.AdamW(optim_groups, lr=self.cfg.optim.lr, betas=self.cfg.optim.betas)\n", " return optimizer\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "kARDwthakEQk" }, + "outputs": [], "source": [ "m = BasicNeMoGPTWithOptim(cfg=cfg.model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2010,9 +1996,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "5K7zh9Cn2s2u" }, + "outputs": [], "source": [ "OmegaConf.set_struct(cfg.model, False)\n", "\n", @@ -2025,9 +2013,7 @@ "cfg.model.optim = optim_config\n", "\n", "OmegaConf.set_struct(cfg.model, True)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2066,22 +2052,24 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "E-fswFkig9t4" }, + "outputs": [], "source": [ "from nemo.core import Dataset\n", "from torch.utils import data\n", "from torch.utils.data.dataloader import DataLoader" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "-Z8XuPeClGNm" }, + "outputs": [], "source": [ "class TinyShakespeareDataset(Dataset):\n", "\n", @@ -2136,9 +2124,7 @@ " 'input': NeuralType(('B', 'T'), Index()),\n", " 'target': NeuralType(('B', 'T'), LabelsType())\n", " }" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2168,50 +2154,50 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "VwsdXtVzo--t" }, + "outputs": [], "source": [ "import os" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "QvKcDCvIl9-A" }, + "outputs": [], "source": [ "if not os.path.exists('tiny-shakespeare.txt'):\n", " !wget https://raw.githubusercontent.com/jcjohnson/torch-rnn/master/data/tiny-shakespeare.txt" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ynCwqDu6vK8P" }, + "outputs": [], "source": [ "!head -n 5 tiny-shakespeare.txt" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "bfRL4t9_oS4C" }, + "outputs": [], "source": [ "train_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(0, int(1e6)))\n", "val_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(int(1e6), int(50e3)), override_vocab=train_dataset.vocab)\n", "test_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(int(1.05e6), int(100e3)), override_vocab=train_dataset.vocab)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2230,9 +2216,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "SVSfIk_-rMSg" }, + "outputs": [], "source": [ "class NeMoGPT(BasicNeMoGPTWithOptim):\n", "\n", @@ -2270,9 +2258,7 @@ " \n", " def setup_test_data(self, test_data_config: OmegaConf):\n", " self._test_dl = self._setup_data_loader(test_data_config)\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2287,9 +2273,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "C6zcTqJixOOL" }, + "outputs": [], "source": [ "OmegaConf.set_struct(cfg.model, False)\n", "\n", @@ -2298,15 +2286,15 @@ "cfg.model.vocab_size = train_dataset.vocab_size\n", "\n", "OmegaConf.set_struct(cfg.model, True)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "zlvThf7BysyT" }, + "outputs": [], "source": [ "train_ds = OmegaConf.create({\n", " 'data_path': '${model.data_path}',\n", @@ -2331,15 +2319,15 @@ " 'batch_size': 4,\n", " 'shuffle': False,\n", "})" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "QVVzR6WKyMT5" }, + "outputs": [], "source": [ "# Attach to the model config\n", "OmegaConf.set_struct(cfg.model, False)\n", @@ -2349,33 +2337,31 @@ "cfg.model.test_ds = test_ds\n", "\n", "OmegaConf.set_struct(cfg.model, True)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "nd_9_mxS0ET-" }, + "outputs": [], "source": [ "# Let's see the config now !\n", "print(OmegaConf.to_yaml(cfg))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "dlwSQENU0JxA" }, + "outputs": [], "source": [ "# Let's try creating a model now !\n", "model = NeMoGPT(cfg=cfg.model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2410,9 +2396,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "johk6Z0e0WEm" }, + "outputs": [], "source": [ "if torch.cuda.is_available():\n", " accelerator = 'gpu'\n", @@ -2420,20 +2408,18 @@ " accelerator = 'cpu'\n", "\n", "trainer = ptl.Trainer(devices=1, accelerator=accelerator, limit_test_batches=1.0)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "oqeeofEr1S8e" }, + "outputs": [], "source": [ "trainer.test(model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2450,48 +2436,48 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "DksG_-7G1Vbe" }, + "outputs": [], "source": [ "model.save_to('gpt_model.nemo')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "JhjoFdCnBWVh" }, + "outputs": [], "source": [ "!ls -d -- *.nemo" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "567txSF0BYXN" }, + "outputs": [], "source": [ "temp_model = NeMoGPT.restore_from('gpt_model.nemo')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "YvnfG0kxBfTt" }, + "outputs": [], "source": [ "# [ERROR CELL]\n", "temp_model.setup_test_data(temp_model.cfg.test_ds)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2510,9 +2496,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "_Atyoc4NBjEV" }, + "outputs": [], "source": [ "class NeMoGPTv2(NeMoGPT):\n", " \n", @@ -2552,61 +2540,61 @@ " self.vocab = vocab\n", "\n", " self._test_dl = self._setup_data_loader(test_data_config)\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "mn09jsRZDusN" }, + "outputs": [], "source": [ "# Let's try creating a model now !\n", "model = NeMoGPTv2(cfg=cfg.model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "sQPIPySDD1K0" }, + "outputs": [], "source": [ "# Now let's try to save and restore !\n", "model.save_to('gpt_model.nemo')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "0YwCJ4xaJ3bU" }, + "outputs": [], "source": [ "temp_model = NeMoGPTv2.restore_from('gpt_model.nemo')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "tcxwDIIWKKCQ" }, + "outputs": [], "source": [ "temp_model.setup_multiple_test_data(temp_model.cfg.test_ds)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "j3Olm6ZTKRbO" }, + "outputs": [], "source": [ "if torch.cuda.is_available():\n", " accelerator = 'gpu'\n", @@ -2614,20 +2602,18 @@ " accelerator = 'cpu'\n", "\n", "trainer = ptl.Trainer(devices=1, accelerator=accelerator, limit_test_batches =1.0)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "_QE2SngCKV2p" }, + "outputs": [], "source": [ "trainer.test(model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2641,14 +2627,26 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ZjCV5u3_OO7a" }, - "source": [ - "" - ], - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "01_NeMo_Models.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" } - ] -} \ No newline at end of file + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/02_NeMo_Adapters.ipynb b/tutorials/02_NeMo_Adapters.ipynb index c7c6bd32137e..273c2fa9b7b2 100644 --- a/tutorials/02_NeMo_Adapters.ipynb +++ b/tutorials/02_NeMo_Adapters.ipynb @@ -25,7 +25,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", @@ -1657,9 +1657,7 @@ "id": "iz2wF3cd-6MF" }, "outputs": [], - "source": [ - "" - ] + "source": [] } ], "metadata": { @@ -1678,4 +1676,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorials/AudioTranslationSample.ipynb b/tutorials/AudioTranslationSample.ipynb index f0ab7df20199..524e0d31d1e2 100644 --- a/tutorials/AudioTranslationSample.ipynb +++ b/tutorials/AudioTranslationSample.ipynb @@ -38,7 +38,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# install Pynini for text normalization\n", @@ -284,4 +284,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb index a13174033e0c..a5b62079e441 100644 --- a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb +++ b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb @@ -1,20 +1,4 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { "cell_type": "code", @@ -41,25 +25,28 @@ "!pip install text-unidecode\n", "\n", "### Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "J6d04-VRjC-O" + }, + "outputs": [], "source": [ "### Install Hugging Face Hub\n", "!python -m pip install huggingface_hub\n", "!python -m pip install evaluate" - ], - "metadata": { - "id": "J6d04-VRjC-O" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "aS-Y5O_oGBTc" + }, "source": [ "# NeMo models on Hugging Face Hub\n", "\n", @@ -68,103 +55,103 @@ "This enables community members to share their NeMo models (any model!) with all users of NeMo!\n", "\n", "**Note**: While in this tutorial we showcase an ASR model, there is no particular restriction to any domain - all NeMo models (.nemo files) of every domain can be uploaded and shared in the same way." - ], - "metadata": { - "id": "aS-Y5O_oGBTc" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Us3UlvwCiEZi" + }, "source": [ "# Login to Hugging Face\n", "\n", "Use the notebook login, and access your user access token (or create one to upload models to Hugging Face).\n", "\n", "For more information, visit the User Access Token section - https://huggingface.co/docs/hub/security-tokens" - ], - "metadata": { - "id": "Us3UlvwCiEZi" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4RTYbCLziEnb" + }, + "outputs": [], "source": [ "from huggingface_hub import notebook_login\n", "\n", "notebook_login()" - ], - "metadata": { - "id": "4RTYbCLziEnb" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "!git config --global credential.helper store" - ], + "execution_count": null, "metadata": { "id": "dgZbTPcFiaml" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "!git config --global credential.helper store" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "s-FiNn1eiFAl" + }, "source": [ "# Prepare a model to upload to HF\n", "\n", "In this example, we will download a NeMo ASR model from NGC and then upload it to Hugging Face for simplicity and to showcase the method.\n", "\n", "**You can swap out this ASR model for any model that you restore via `restore_from()` and follow the same steps to upload your own models !**" - ], - "metadata": { - "id": "s-FiNn1eiFAl" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5KnVl-M0ax14" + }, + "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "\n", "from omegaconf import DictConfig, OmegaConf, open_dict" - ], - "metadata": { - "id": "5KnVl-M0ax14" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ZEDpkIinbwmm" + }, + "outputs": [], "source": [ "import nemo.collections.asr as nemo_asr # use any domain's models !\n", "import nemo.collections.nlp as nemo_nlp # use any domain's models !\n", "import nemo.collections.tts as nemo_tts # use any domain's models !" - ], - "metadata": { - "id": "ZEDpkIinbwmm" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "mLuQo1vnHVcP" + }, "source": [ "# Model Name\n", "\n", "NeMo adheres to strict requirements when naming a model for upload to NGC / Hugging Face Hub. \n", "\n", "It is **mandatory** to share the model name across the model card, the NeMo file itself. Otherwise NeMo model from Hugging Face will fail to restore correctly." - ], - "metadata": { - "id": "mLuQo1vnHVcP" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "MRO2f9fhHywJ" + }, "source": [ "## Naming Convention\n", "\n", @@ -192,130 +179,132 @@ "As an example of the following model we will try today : \n", "\n", "`{task name}_{language id}_{model identifier}_[OPTIONAL modifiers]` = `stt_en_conformer_ctc_small`" - ], - "metadata": { - "id": "MRO2f9fhHywJ" - } + ] }, { "cell_type": "markdown", - "source": [ - "**Set the MODEL_NAME carefully** !" - ], "metadata": { "id": "BjLstKWnPzWV" - } + }, + "source": [ + "**Set the MODEL_NAME carefully** !" + ] }, { "cell_type": "code", - "source": [ - "MODEL_NAME = \"stt_en_conformer_ctc_small\"" - ], + "execution_count": null, "metadata": { "id": "UzHjXDbckU0M" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "MODEL_NAME = \"stt_en_conformer_ctc_small\"" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "qibj1RwvKjSQ" + }, "source": [ "-----\n", "**Restore a NeMo Model**\n", "\n", "Here, we restore a model from NGC directly, but you can restore a model from your training runs using `restore_from()` or use a local .nemo file." - ], - "metadata": { - "id": "qibj1RwvKjSQ" - } + ] }, { "cell_type": "code", - "source": [ - "model = nemo_asr.models.ASRModel.from_pretrained(MODEL_NAME)" - ], + "execution_count": null, "metadata": { "id": "MsC3pE65d_z2" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "model = nemo_asr.models.ASRModel.from_pretrained(MODEL_NAME)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "y1AkXPFVKfC2" + }, "source": [ "# Create a Hugging Face Model\n", "\n", "Now that we have a NeMo model and have logged into Hugging Face with our user API key, we can begin by creating a new repository and uploading our model." - ], - "metadata": { - "id": "y1AkXPFVKfC2" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "iv17qFG7KzlL" + }, "source": [ "-----\n", "\n", "After the model has been restored, create an HfApi object to interact with the model repository." - ], - "metadata": { - "id": "iv17qFG7KzlL" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "aJUXCOTjKy-2" + }, + "outputs": [], "source": [ "from huggingface_hub import HfApi\n", "api = HfApi()\n", "username = api.whoami()['name']" - ], - "metadata": { - "id": "aJUXCOTjKy-2" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "DKRlMeaEkeAH" + }, + "outputs": [], "source": [ "try:\n", " api.create_repo(repo_id=MODEL_NAME)\n", " print(\"Successfully created repository !\")\n", "except Exception as e:\n", " print(\"Repository is possibly already created. Refer to error here - \\n\\n\", e)" - ], - "metadata": { - "id": "DKRlMeaEkeAH" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "from huggingface_hub import Repository" - ], + "execution_count": null, "metadata": { "id": "N2-deSyTlCdS" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "from huggingface_hub import Repository" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "aTa4RqDYLGMI" + }, "source": [ "Note two essential names - \n", "\n", "- `hf_model_name`: A string name that is the composite of your `username` and `MODEL_NAME` as set above. This name is used for multiple purposes, so keep track of it.\n", "\n", "- `model_filename`: The actual filename of the NeMo model that will be uploaded to Hugging Face. Note that this filename is explicitly set to `{MODEL_NAME}.nemo`. If this model filename is altered, then the model cannot correctly be restored by NeMo when downloaded from Hugging Face Hub, so please be careful." - ], - "metadata": { - "id": "aTa4RqDYLGMI" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "xhTTMNpBskMS" + }, + "outputs": [], "source": [ "local_dir = f'model-{MODEL_NAME}/'\n", "hf_model_name = f'{username}/{MODEL_NAME}'\n", @@ -325,62 +314,60 @@ "\n", "with Repository(local_dir=local_dir, clone_from=hf_model_name, repo_type='model').commit(commit_message):\n", " model.save_to(model_filename)" - ], - "metadata": { - "id": "xhTTMNpBskMS" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "print(\"Finished uploading model to :\", hf_model_name)" - ], + "execution_count": null, "metadata": { "id": "BhvNp8MYvxLi" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "print(\"Finished uploading model to :\", hf_model_name)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Qrs-MlW9vVbH" + }, "source": [ "## Test if the model works \n", "\n", "Now that we uploaded the model, let's try to use it in NeMo !\n", "\n", "The only change required between normally calling `from_pretrained(model_name)` is to call **`from_pretrained({username}/{filename})`**" - ], - "metadata": { - "id": "Qrs-MlW9vVbH" - } + ] }, { "cell_type": "code", - "source": [ - "hf_model_name = f'{username}/{MODEL_NAME}'\n", - "hf_model = nemo_asr.models.ASRModel.from_pretrained(hf_model_name)" - ], + "execution_count": null, "metadata": { "id": "NyuyyRv5snkr" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "hf_model_name = f'{username}/{MODEL_NAME}'\n", + "hf_model = nemo_asr.models.ASRModel.from_pretrained(hf_model_name)" + ] }, { "cell_type": "code", - "source": [ - "print(\"Successfully used HF model -\", hf_model_name)" - ], + "execution_count": null, "metadata": { "id": "Yhi922WVv4G_" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "print(\"Successfully used HF model -\", hf_model_name)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "9gG1ElJywEJT" + }, "source": [ "# Model Card\n", "\n", @@ -389,38 +376,40 @@ "The next step is to update the model card to have some helpful information regarding the uploaded model and its scores compared to other models.\n", "\n", "You can do this in two ways, manually (by clicking the link below) or programmatically fill in part of the model card by following the instructions below." - ], - "metadata": { - "id": "9gG1ElJywEJT" - } + ] }, { "cell_type": "code", - "source": [ - "hf_url = f'https://huggingface.co/{username}/{MODEL_NAME}'\n", - "print(f\"Visit {hf_url} to manually edit your model card\")" - ], + "execution_count": null, "metadata": { "id": "aZJRKoxhwBLr" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "hf_url = f'https://huggingface.co/{username}/{MODEL_NAME}'\n", + "print(f\"Visit {hf_url} to manually edit your model card\")" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "ZlA4hNq6w4rH" + }, "source": [ "-----\n", "\n", "Here, we are going to setup some variables for our model card.\n", "\n", "First up are the tags:" - ], - "metadata": { - "id": "ZlA4hNq6w4rH" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "QxKtPynWyUWX" + }, + "outputs": [], "source": [ "TAGS = [\n", " \"automatic-speech-recognition\", # Task id, refer to https://github.com/huggingface/datasets/blob/master/src/datasets/utils/resources/tasks.json for allowed values.\n", @@ -433,15 +422,13 @@ " \"pytorch\", # required, for toolkit identification\n", " # \"hf-asr-leaderboard\", # Should only be used if model is evaluated on benchmark scores for ASR.\n", "]" - ], - "metadata": { - "id": "QxKtPynWyUWX" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Fh7rYWEMM0Vz" + }, "source": [ "-----\n", "\n", @@ -450,13 +437,15 @@ "By convention, try to search if the dataset already exists on Hugging Face Datasets - it is usually listed at the top and in lower case.\n", "\n", "If you train on datasets that don't yet exist in Hugging Face Datasets, you can still add them but try to differentiate them by using capitalized names." - ], - "metadata": { - "id": "Fh7rYWEMM0Vz" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "qy-5aDAgzuGD" + }, + "outputs": [], "source": [ "# Replace all spaces with `-`\n", "DATASETS = [\n", @@ -473,26 +462,26 @@ " \"Europarl-ASR-(EN)\",\n", " \"Multilingual-LibriSpeech-(2000-hours)\",\n", "]" - ], - "metadata": { - "id": "qy-5aDAgzuGD" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "_0w1X_z4NN5-" + }, "source": [ "-----\n", "\n", "Now we create an automated template based on a config for the top portion of the readme file." - ], - "metadata": { - "id": "_0w1X_z4NN5-" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "O88WFyPJwjJD" + }, + "outputs": [], "source": [ "from dataclasses import dataclass, field\n", "from typing import List, Optional, Dict, Any\n", @@ -507,15 +496,15 @@ " thumbnail: Optional[str] = None\n", " tags: List[str] = field(default_factory=lambda: TAGS)\n", " model_index: Any = field(default_factory=lambda: [dict(name=MODEL_NAME, results=[])])" - ], - "metadata": { - "id": "O88WFyPJwjJD" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BpInrBdNxxZ3" + }, + "outputs": [], "source": [ "config = NeMoHuggingFaceModelConfig(language=['en'], license=\"cc-by-4.0\") # choose appropriate license here\n", "config = OmegaConf.structured(config)\n", @@ -530,28 +519,28 @@ " config['datasets'] = OmegaConf.create(normalized_datasets)\n", "\n", "print(OmegaConf.to_yaml(config))" - ], - "metadata": { - "id": "BpInrBdNxxZ3" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "0TECX8QrC6FY" + }, "source": [ "## Markdown Template\n", "\n", "Now that we have an auto-generated header for our readme, next, we write down some template markdown for the actual contents of the markdown.\n", "\n", "You can edit the code here directly if you want, or if you prefer the GUI to see the actual changes in real-time, you can finish uploading this model card and then edit the readme file on the Hugging Face webpage itself." - ], - "metadata": { - "id": "0TECX8QrC6FY" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SSmm7_OiC9Ex" + }, + "outputs": [], "source": [ "hf_model_name = f'{username}/{MODEL_NAME}'\n", "\n", @@ -637,28 +626,28 @@ "[1] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)\n", "\n", "\"\"\"" - ], - "metadata": { - "id": "SSmm7_OiC9Ex" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "KPa53S_5NzNp" + }, "source": [ "-----\n", "\n", "Below, we will upload this model card in a temporary file called **`\"readme_template.md\"`**. This is done to prevent overwriting of the \"final\" model card that the user may have manually edited.\n", "\n", "Once this step is finished, **please copy the contents of this file, create a README.md file and paste the contents into it**." - ], - "metadata": { - "id": "KPa53S_5NzNp" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0vk5KK4gzpSU" + }, + "outputs": [], "source": [ "local_dir = f'model-{MODEL_NAME}/'\n", "hf_model_name = f'{username}/{MODEL_NAME}'\n", @@ -673,75 +662,75 @@ " f.write(\"\\n---\\n\\n\")\n", " f.write(TEMPLATE)\n", " " - ], - "metadata": { - "id": "0vk5KK4gzpSU" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "dfXoihCQmWDa" + }, "source": [ "-----\n", "\n", "Please visit the URL below to copy the contents of the `readme_template.md` file into your `README.md` file." - ], - "metadata": { - "id": "dfXoihCQmWDa" - } + ] }, { "cell_type": "code", - "source": [ - "hf_url = f'https://huggingface.co/{username}/{MODEL_NAME}'\n", - "print(f\"Visit {hf_url} to edit your model card from the generated template file `{filename}`\")" - ], + "execution_count": null, "metadata": { "id": "but-5LuLTHFd" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "hf_url = f'https://huggingface.co/{username}/{MODEL_NAME}'\n", + "print(f\"Visit {hf_url} to edit your model card from the generated template file `{filename}`\")" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "5vPEnlE62dGU" + }, "source": [ "## Evaluation Results\n", "\n", "Now that we have both the model checkpoint and the readme uploaded to the Hub, we can optionally add some evaluation results to the card as well!\n", "\n", "While this next section is optional, it is highly encouraged to do!" - ], - "metadata": { - "id": "5vPEnlE62dGU" - } + ] }, { "cell_type": "code", - "source": [ - "import evaluate\n", - "# evaluate.list_evaluation_modules(module_type='metric', with_details=True)" - ], + "execution_count": null, "metadata": { "id": "rkXMtapA0YzH" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "import evaluate\n", + "# evaluate.list_evaluation_modules(module_type='metric', with_details=True)" + ] }, { "cell_type": "code", - "source": [ - "# Uncomment in order to see what values you can supply to the `evaluate` library to push to the Hub.\n", - "# help(evaluate.push_to_hub)" - ], + "execution_count": null, "metadata": { "id": "50rzG9Qb3yLR" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "# Uncomment in order to see what values you can supply to the `evaluate` library to push to the Hub.\n", + "# help(evaluate.push_to_hub)" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5A4g3SGf4d0V" + }, + "outputs": [], "source": [ "hf_model_name = f'{username}/{MODEL_NAME}'\n", "metric_value = 8.1 # value obtained from https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_conformer_ctc_small \n", @@ -759,25 +748,36 @@ " # the actual score obtained by the model\n", " metric_value=metric_value,\n", ")" - ], - "metadata": { - "id": "5A4g3SGf4d0V" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "f3YYa7liO_m3" + }, "source": [ "-----\n", "\n", "Done! Now we have a model checkpoint, a model card as well as evaluation results all set up for the NeMo model on Hugging Face!\n", "\n", "To add more metrics, you can copy-paste the above cell and repeat the procedure for as many metrics as needed!" - ], - "metadata": { - "id": "f3YYa7liO_m3" - } + ] } - ] -} \ No newline at end of file + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/VoiceSwapSample.ipynb b/tutorials/VoiceSwapSample.ipynb index 7c895e4e6681..9981deabe2ec 100644 --- a/tutorials/VoiceSwapSample.ipynb +++ b/tutorials/VoiceSwapSample.ipynb @@ -39,7 +39,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# install Pynini for text normalization\n", @@ -329,4 +329,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb index 27b229af8a4c..e128c578e4f1 100644 --- a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb +++ b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb @@ -1,25 +1,12 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "ASR_CTC_Language_Finetuning.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "accelerator": "GPU" - }, "cells": [ { "cell_type": "code", + "execution_count": null, "metadata": { "id": "EGV_ioUHqhun" }, + "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -39,7 +26,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", @@ -48,9 +35,7 @@ "that you want to use the \"Run All Cells\" (or similar) option.\n", "\"\"\"\n", "# exit()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -75,9 +60,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "1cjMaek4rY8-" }, + "outputs": [], "source": [ "import os\n", "import glob\n", @@ -86,15 +73,15 @@ "import wget\n", "import copy\n", "from omegaconf import OmegaConf, open_dict" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "8wqTRjpNruZD" }, + "outputs": [], "source": [ "data_dir = 'datasets/'\n", "\n", @@ -103,23 +90,21 @@ "\n", "if not os.path.exists(\"scripts\"):\n", " os.makedirs(\"scripts\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "TSTb6b5DriWG" }, + "outputs": [], "source": [ "import nemo\n", "import nemo.collections.asr as nemo_asr\n", "from nemo.collections.asr.metrics.wer import word_error_rate\n", "from nemo.utils import logging, exp_manager" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -138,52 +123,52 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "27h1i8qa7WFE" }, + "outputs": [], "source": [ "if not os.path.exists(\"scripts/get_commonvoice_data.py\"):\n", " !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/dataset_processing/get_commonvoice_data.py" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "x0i8hvt688hc" }, + "outputs": [], "source": [ "VERSION = \"cv-corpus-6.1-2020-12-11\"\n", "LANGUAGE = \"ja\"" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "-wI16qY_misb" }, + "outputs": [], "source": [ "tokenizer_dir = os.path.join('tokenizers', LANGUAGE)\n", "manifest_dir = os.path.join('manifests', LANGUAGE)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "bvOT_La2NNw1" }, + "outputs": [], "source": [ "# If something goes wrong during data processing, un-comment the following line to delete the cached dataset \n", "# !rm -rf datasets/$LANGUAGE\n", "!mkdir -p datasets" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -196,9 +181,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Inwx4OE97guu" }, + "outputs": [], "source": [ "!python scripts/get_commonvoice_data.py \\\n", " --data_root \"datasets/$LANGUAGE/\" \\\n", @@ -208,9 +195,7 @@ " --version=$VERSION \\\n", " --language=$LANGUAGE \\\n", " --files_to_process 'train.tsv' 'dev.tsv' 'test.tsv'" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -223,16 +208,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "j7WAGLX59C26" }, + "outputs": [], "source": [ "train_manifest = f\"{manifest_dir}/commonvoice_train_manifest.json\"\n", "dev_manifest = f\"{manifest_dir}/commonvoice_dev_manifest.json\"\n", "test_manifest = f\"{manifest_dir}/commonvoice_test_manifest.json\"" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -262,9 +247,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "EdkJYxUirp7C" }, + "outputs": [], "source": [ "# Manifest Utils\n", "from tqdm.auto import tqdm\n", @@ -292,22 +279,20 @@ " f.write(f\"{datum}\\n\")\n", " print(f\"Finished writing manifest: {filepath}\")\n", " return filepath" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "HngfzcwOijy4" }, + "outputs": [], "source": [ "train_manifest_data = read_manifest(train_manifest)\n", "dev_manifest_data = read_manifest(dev_manifest)\n", "test_manifest_data = read_manifest(test_manifest)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -320,16 +305,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "T2iwnvhXimfG" }, + "outputs": [], "source": [ "train_text = [data['text'] for data in train_manifest_data]\n", "dev_text = [data['text'] for data in dev_manifest_data]\n", "test_text = [data['text'] for data in test_manifest_data]" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -344,9 +329,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "XpUb_pI5imhh" }, + "outputs": [], "source": [ "from collections import defaultdict\n", "\n", @@ -357,22 +344,20 @@ " for character in text:\n", " charset[character] += 1\n", " return charset" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "obcPlrOJimju" }, + "outputs": [], "source": [ "train_charset = get_charset(train_manifest_data)\n", "dev_charset = get_charset(dev_manifest_data)\n", "test_charset = get_charset(test_manifest_data)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -385,27 +370,27 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Z8QVdph6imlz" }, + "outputs": [], "source": [ "train_dev_set = set.union(set(train_charset.keys()), set(dev_charset.keys()))\n", "test_set = set(test_charset.keys())" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "NgCfETWNimn3" }, + "outputs": [], "source": [ "print(f\"Number of tokens in train+dev set : {len(train_dev_set)}\")\n", "print(f\"Number of tokens in test set : {len(test_set)}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -446,9 +431,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "KPrBi35Cimqc" }, + "outputs": [], "source": [ "# OOV tokens in test set\n", "train_test_common = set.intersection(train_dev_set, test_set)\n", @@ -456,9 +443,7 @@ "print(f\"Number of OOV tokens in test set : {len(test_oov)}\")\n", "print()\n", "print(test_oov)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -486,9 +471,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "VDDiXCiPimr_" }, + "outputs": [], "source": [ "# Populate dictionary mapping count: list[tokens]\n", "train_counts = defaultdict(list)\n", @@ -499,9 +486,7 @@ "\n", "# Compute sorter order of the count keys\n", "count_keys = sorted(list(train_counts.keys()))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -514,9 +499,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "TJeVEKvAimwE" }, + "outputs": [], "source": [ "MAX_COUNT = 32\n", "\n", @@ -528,9 +515,7 @@ "\n", " TOKEN_COUNT_X.append(count)\n", " NUM_TOKENS_Y.append(num_tokens)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -543,9 +528,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "rKULANgINqbq" }, + "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", @@ -554,9 +541,7 @@ "plt.xlabel(\"# of occurances\")\n", "plt.ylabel(\"# of tokens\")\n", "plt.xlim(0, MAX_COUNT);" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -569,9 +554,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "9G6laS0ojV-B" }, + "outputs": [], "source": [ "UNCOMMON_TOKENS_COUNT = 5\n", "\n", @@ -582,9 +569,7 @@ " chars_with_infrequent_occurance.update(set(token_list))\n", "\n", "print(f\"Number of tokens with <= {UNCOMMON_TOKENS_COUNT} occurances : {len(chars_with_infrequent_occurance)}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -599,9 +584,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "jnh_pnL2jWAY" }, + "outputs": [], "source": [ "all_tokens = set.union(train_dev_set, test_set)\n", "print(f\"Original train+dev+test vocab size : {len(all_tokens)}\")\n", @@ -609,9 +596,7 @@ "extra_kanji = set(test_oov)\n", "train_token_set = all_tokens - extra_kanji\n", "print(f\"New train vocab size : {len(train_token_set)}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -639,38 +624,40 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { - "id": "kaX9WzK15Q6t", - "cellView": "form" + "cellView": "form", + "id": "kaX9WzK15Q6t" }, + "outputs": [], "source": [ "#@title Dakuten normalization\n", "perform_dakuten_normalization = True #@param [\"True\", \"False\"] {type:\"raw\"}\n", "PERFORM_DAKUTEN_NORMALIZATION = bool(perform_dakuten_normalization)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "HiEZVEshOp-y" }, + "outputs": [], "source": [ "import unicodedata\n", "def process_dakuten(text):\n", " normalized_text = unicodedata.normalize('NFD', text)\n", " normalized_text = normalized_text.replace(\"\\u3099\", \"\").replace(\"\\u309A\", \"\")\n", " return normalized_text" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "pV4kOgpvjWGg" }, + "outputs": [], "source": [ "if PERFORM_DAKUTEN_NORMALIZATION:\n", " normalized_train_token_set = set()\n", @@ -682,9 +669,7 @@ "else:\n", " normalized_train_token_set = train_token_set\n", " " - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -701,9 +686,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "NN3asqvsrp_S" }, + "outputs": [], "source": [ "# Preprocessing steps\n", "import re\n", @@ -727,9 +714,7 @@ " text = data['text']\n", " data['text'] = process_dakuten(text)\n", " return data" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -744,9 +729,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "mwNtHeHLjqJl" }, + "outputs": [], "source": [ "# Processing pipeline\n", "def apply_preprocessors(manifest, preprocessors):\n", @@ -756,15 +743,15 @@ "\n", " print(\"Finished processing manifest !\")\n", " return manifest" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "xB06YHmDr-Ja" }, + "outputs": [], "source": [ "# List of pre-processing functions\n", "PREPROCESSORS = [\n", @@ -772,15 +759,15 @@ " remove_extra_kanji,\n", " remove_dakuten,\n", "]" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "4lqUvpkrr7bQ" }, + "outputs": [], "source": [ "# Load manifests\n", "train_data = read_manifest(train_manifest)\n", @@ -796,9 +783,7 @@ "train_manifest_cleaned = write_processed_manifest(train_data_processed, train_manifest)\n", "dev_manifest_cleaned = write_processed_manifest(dev_data_processed, dev_manifest)\n", "test_manifest_cleaned = write_processed_manifest(test_data_processed, test_manifest)\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -813,9 +798,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "WpHk6HW6O0FW" }, + "outputs": [], "source": [ "train_manifest_data = read_manifest(train_manifest_cleaned)\n", "train_charset = get_charset(train_manifest_data)\n", @@ -824,20 +811,18 @@ "dev_charset = get_charset(dev_manifest_data)\n", "\n", "train_dev_set = set.union(set(train_charset.keys()), set(dev_charset.keys()))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "R3xkR4_dPd3C" }, + "outputs": [], "source": [ "print(f\"Number of tokens in preprocessed train+dev set : {len(train_dev_set)}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -854,14 +839,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "DlJmwh-iei77" }, + "outputs": [], "source": [ "char_model = nemo_asr.models.ASRModel.from_pretrained(\"stt_en_quartznet15x5\", map_location='cpu')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -876,14 +861,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "1VU-jfYLei9-" }, + "outputs": [], "source": [ "char_model.change_vocabulary(new_vocabulary=list(train_dev_set))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -906,16 +891,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "6PPDTaLyejAR" }, + "outputs": [], "source": [ "#@title Freeze Encoder { display-mode: \"form\" }\n", "freeze_encoder = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "freeze_encoder = bool(freeze_encoder)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -938,9 +923,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "1qiTTgDGejC9" }, + "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", @@ -955,15 +942,15 @@ " m.train()\n", " for param in m.parameters():\n", " param.requires_grad_(True)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "9I5dx_GWejFm" }, + "outputs": [], "source": [ "if freeze_encoder:\n", " char_model.encoder.freeze()\n", @@ -972,9 +959,7 @@ "else:\n", " char_model.encoder.unfreeze()\n", " logging.info(\"Model encoder has been un-frozen\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1004,14 +989,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "TBIy8p0fV7sa" }, + "outputs": [], "source": [ "char_model.cfg.labels = list(train_dev_set)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1024,14 +1009,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "pzpByrdfejIA" }, + "outputs": [], "source": [ "cfg = copy.deepcopy(char_model.cfg)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1048,9 +1033,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "KlQ5iGrZejKy" }, + "outputs": [], "source": [ "# Setup train, validation, test configs\n", "with open_dict(cfg): \n", @@ -1071,22 +1058,20 @@ " cfg.validation_ds.num_workers = 8\n", " cfg.validation_ds.pin_memory = True\n", " cfg.validation_ds.trim_silence = True" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "tx9DixV0ejMo" }, + "outputs": [], "source": [ "# setup data loaders with new configs\n", "char_model.setup_training_data(cfg.train_ds)\n", "char_model.setup_multiple_validation_data(cfg.validation_ds)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1104,21 +1089,23 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "MgoD5hOKYSKJ" }, + "outputs": [], "source": [ "# Original optimizer + scheduler\n", "print(OmegaConf.to_yaml(char_model.cfg.optim))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "okytaslHejOm" }, + "outputs": [], "source": [ "with open_dict(char_model.cfg.optim):\n", " char_model.cfg.optim.lr = 0.01\n", @@ -1127,9 +1114,7 @@ " char_model.cfg.optim.sched.warmup_steps = None # Remove default number of steps of warmup\n", " char_model.cfg.optim.sched.warmup_ratio = 0.05 # 5 % warmup\n", " char_model.cfg.optim.sched.min_lr = 1e-5" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1151,20 +1136,22 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "aJ6Md-dLejRA" }, + "outputs": [], "source": [ "print(OmegaConf.to_yaml(char_model.cfg.spec_augment))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "3ei9WsLzejTI" }, + "outputs": [], "source": [ "# with open_dict(char_model.cfg.spec_augment):\n", "# char_model.cfg.spec_augment.freq_masks = 2\n", @@ -1173,9 +1160,7 @@ "# char_model.cfg.spec_augment.time_width = 0.05\n", "\n", "char_model.spec_augmentation = char_model.from_config_dict(char_model.cfg.spec_augment)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1194,30 +1179,30 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { - "id": "cN1FC0o2ejVg", - "cellView": "form" + "cellView": "form", + "id": "cN1FC0o2ejVg" }, + "outputs": [], "source": [ "#@title Metric\n", "use_cer = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "log_prediction = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "HURZMpPwejXa" }, + "outputs": [], "source": [ "char_model._wer.use_cer = use_cer\n", "char_model._wer.log_prediction = log_prediction" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1234,9 +1219,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "eaw1qsQIf1Zv" }, + "outputs": [], "source": [ "import torch\n", "import pytorch_lightning as ptl\n", @@ -1262,15 +1249,15 @@ "\n", "# Finally, update the model's internal config\n", "char_model.cfg = char_model._cfg" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ENSpJJqcf1cG" }, + "outputs": [], "source": [ "# Environment variable generally used for multi-node multi-gpu training.\n", "# In notebook environments, this flag is unnecessary and can cause logs of multiple training runs to overwrite each other.\n", @@ -1290,15 +1277,15 @@ "config = OmegaConf.structured(config)\n", "\n", "logdir = exp_manager.exp_manager(trainer, config)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ATI2R0D7rylR" }, + "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -1312,21 +1299,19 @@ " %tensorboard --logdir /content/experiments/lang-$LANGUAGE/ASR-Char-Model-Language-$LANGUAGE/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "TvaESyJHf1eb" }, + "outputs": [], "source": [ "%%time\n", "trainer.fit(char_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1376,27 +1361,27 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "yIUQklly9BPa" }, + "outputs": [], "source": [ "if not os.path.exists(\"scripts/process_asr_text_tokenizer.py\"):\n", " !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tokenizers/process_asr_text_tokenizer.py" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "SKA9rrpbm3nu" }, + "outputs": [], "source": [ "#@title Tokenizer Config { display-mode: \"form\" }\n", "TOKENIZER_TYPE = \"bpe\" #@param [\"bpe\", \"unigram\"]" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1411,15 +1396,15 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "lO_uskUEm2ZG" }, + "outputs": [], "source": [ "# << VOCAB SIZE can be changed to any value larger than (len(train_dev_set) + 2)! >>\n", "VOCAB_SIZE = len(train_dev_set) + 2" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1436,9 +1421,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "yT-SBPN2Ox6Y" }, + "outputs": [], "source": [ "!python scripts/process_asr_text_tokenizer.py \\\n", " --manifest=$train_manifest_cleaned,$dev_manifest_cleaned \\\n", @@ -1449,21 +1436,19 @@ " --spe_character_coverage=1.0 \\\n", " --no_lower_case \\\n", " --log" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "G5TxLHtKPW4E" }, + "outputs": [], "source": [ "TOKENIZER_DIR = f\"{tokenizer_dir}/tokenizer_spe_{TOKENIZER_TYPE}_v{VOCAB_SIZE}/\"\n", "print(\"Tokenizer directory :\", TOKENIZER_DIR)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1478,9 +1463,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "8sAz2_RyMu7J" }, + "outputs": [], "source": [ "# Number of tokens in tokenizer - \n", "with open(os.path.join(TOKENIZER_DIR, 'tokenizer.vocab')) as f:\n", @@ -1488,15 +1475,15 @@ "\n", "num_tokens = len(tokens)\n", "print(\"Number of tokens : \", num_tokens)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "zktPYPCxNXNO" }, + "outputs": [], "source": [ "if num_tokens < VOCAB_SIZE:\n", " print(\n", @@ -1504,9 +1491,7 @@ " f\"with vocab size = {VOCAB_SIZE}. Current number of tokens = {num_tokens}. \"\n", " f\"Please reconstruct the tokenizer with fewer tokens\"\n", " )" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1521,14 +1506,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "mmSj18iQQTZx" }, + "outputs": [], "source": [ "model = nemo_asr.models.ASRModel.from_pretrained(\"stt_en_citrinet_512\", map_location='cpu')" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1548,15 +1533,15 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "FmFQKwGkoaIx" }, + "outputs": [], "source": [ "# Preserve the decoder parameters in case weight matching can be done later\n", "pretrained_decoder = model.decoder.state_dict()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1571,14 +1556,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "-8SKfYSVorgg" }, + "outputs": [], "source": [ "model.change_vocabulary(new_tokenizer_dir=TOKENIZER_DIR, new_tokenizer_type=\"bpe\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1595,9 +1580,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "367FBtRDorkT" }, + "outputs": [], "source": [ "# Insert preserved model weights if shapes match\n", "if model.decoder.decoder_layers[0].weight.shape == pretrained_decoder['decoder_layers.0.weight'].shape:\n", @@ -1605,9 +1592,7 @@ " logging.info(\"Decoder shapes matched - restored weights from pre-trained model\")\n", "else:\n", " logging.info(\"\\nDecoder shapes did not match - could not restore decoder weights from pre-trained model.\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1622,22 +1607,24 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "lfDW0gQVpm4d" }, + "outputs": [], "source": [ "#@title Freeze Encoder { display-mode: \"form\" }\n", "freeze_encoder = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "freeze_encoder = bool(freeze_encoder)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "oLkm96zkplrX" }, + "outputs": [], "source": [ "if freeze_encoder:\n", " model.encoder.freeze()\n", @@ -1646,9 +1633,7 @@ "else:\n", " model.encoder.unfreeze()\n", " logging.info(\"Model encoder has been un-frozen\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1665,14 +1650,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "pBYAd_2-R2r3" }, + "outputs": [], "source": [ "cfg = copy.deepcopy(model.cfg)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1687,9 +1672,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "NfbtgTC-RyzF" }, + "outputs": [], "source": [ "# Setup new tokenizer\n", "cfg.tokenizer.dir = TOKENIZER_DIR\n", @@ -1697,9 +1684,7 @@ "\n", "# Set tokenizer config\n", "model.cfg.tokenizer = cfg.tokenizer" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1716,21 +1701,23 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "wnw-ygClmg7t" }, + "outputs": [], "source": [ "# Setup train/val/test configs\n", "print(OmegaConf.to_yaml(cfg.train_ds))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "OlOowK7rRAvs" }, + "outputs": [], "source": [ "# Setup train, validation, test configs\n", "with open_dict(cfg):\n", @@ -1757,23 +1744,21 @@ " cfg.test_ds.pin_memory = True\n", " cfg.test_ds.use_start_end_token = True\n", " cfg.test_ds.trim_silence = True" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "y98ZAhBtRtoD" }, + "outputs": [], "source": [ "# setup model with new configs\n", "model.setup_training_data(cfg.train_ds)\n", "model.setup_multiple_validation_data(cfg.validation_ds)\n", "model.setup_multiple_test_data(cfg.test_ds)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1797,9 +1782,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ozJDj6BktKw-" }, + "outputs": [], "source": [ "def analyse_ctc_failures_in_model(model):\n", " count_ctc_failures = 0\n", @@ -1836,52 +1823,52 @@ " model = model.train()\n", " \n", " return count_ctc_failures, am_seq_lengths, target_seq_lengths" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "hJGUcq2BtKzw" }, + "outputs": [], "source": [ "results = analyse_ctc_failures_in_model(model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "crEWxvI2tK2S" }, + "outputs": [], "source": [ "num_ctc_failures, am_seq_lengths, target_seq_lengths = results" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "L8M0-mSI1Jp5" }, + "outputs": [], "source": [ "if num_ctc_failures > 0:\n", " logging.warning(f\"\\nCTC loss will fail for {num_ctc_failures} samples ({num_ctc_failures * 100./ float(len(am_seq_lengths))} % of samples)!\\n\"\n", " f\"Increase the vocabulary size of the tokenizer so that this number becomes close to zero !\")\n", "else:\n", " logging.info(\"No CTC failure cases !\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "00wKre0W1Jsx" }, + "outputs": [], "source": [ "# Compute average ratio of T / U\n", "avg_T = sum(am_seq_lengths) / float(len(am_seq_lengths))\n", @@ -1896,9 +1883,7 @@ "print(f\"Average Target sequence length = {avg_U}\")\n", "print()\n", "print(f\"Ratio of Average AM sequence length to target sequence length = {avg_length_ratio}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1913,14 +1898,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "sS-xoplxSTJv" }, + "outputs": [], "source": [ "print(OmegaConf.to_yaml(cfg.optim))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1935,9 +1920,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Io55nnbdXoeG" }, + "outputs": [], "source": [ "with open_dict(model.cfg.optim):\n", " model.cfg.optim.lr = 0.025\n", @@ -1945,9 +1932,7 @@ " model.cfg.optim.sched.warmup_steps = None # Remove default number of steps of warmup\n", " model.cfg.optim.sched.warmup_ratio = 0.10 # 10 % warmup\n", " model.cfg.optim.sched.min_lr = 1e-9" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1962,9 +1947,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "6Vb35_oRh_sV" }, + "outputs": [], "source": [ "with open_dict(model.cfg.spec_augment):\n", " model.cfg.spec_augment.freq_masks = 2\n", @@ -1973,9 +1960,7 @@ " model.cfg.spec_augment.time_width = 0.05\n", "\n", "model.spec_augmentation = model.from_config_dict(model.cfg.spec_augment)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1990,30 +1975,30 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "cellView": "form", "id": "UfUlPXZS6vlV" }, + "outputs": [], "source": [ "#@title Metric\n", "use_cer = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "log_prediction = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "6qpbMNZh68p9" }, + "outputs": [], "source": [ "model._wer.use_cer = use_cer\n", "model._wer.log_prediction = log_prediction" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2030,9 +2015,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "bonpx5sRS07M" }, + "outputs": [], "source": [ "import torch\n", "import pytorch_lightning as ptl\n", @@ -2058,15 +2045,15 @@ "\n", "# finally, update the model's internal config\n", "model.cfg = model._cfg" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "SR4CiViFS8Ww" }, + "outputs": [], "source": [ "from nemo.utils import exp_manager\n", "\n", @@ -2088,15 +2075,15 @@ "config = OmegaConf.structured(config)\n", "\n", "logdir = exp_manager.exp_manager(trainer, config)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "OlvyYwYWTsl6" }, + "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -2110,21 +2097,19 @@ " %tensorboard --logdir /content/experiments/lang-$LANGUAGE/ASR-Model-Language-$LANGUAGE/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "6X21Q2qfVLvG" }, + "outputs": [], "source": [ "%%time\n", "trainer.fit(model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2139,16 +2124,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "DoWNVNYGOaMX" }, + "outputs": [], "source": [ "save_path = f\"Model-{LANGUAGE}.nemo\"\n", "model.save_to(f\"{save_path}\")\n", "print(f\"Model saved at path : {os.getcwd() + os.path.sep + save_path}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -2163,5 +2148,20 @@ "While the focus was on a small dataset for Japanese, nearly all of this information can be used for larger datasets and other scenarios where compute is limited, or the model's size prevents fine-tuning the entire model." ] } - ] -} \ No newline at end of file + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "ASR_CTC_Language_Finetuning.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/asr/ASR_for_telephony_speech.ipynb b/tutorials/asr/ASR_for_telephony_speech.ipynb index 787b448620f7..79ee2d03226f 100644 --- a/tutorials/asr/ASR_for_telephony_speech.ipynb +++ b/tutorials/asr/ASR_for_telephony_speech.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", @@ -340,4 +340,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/asr/ASR_with_NeMo.ipynb b/tutorials/asr/ASR_with_NeMo.ipynb index 9b86fab7e900..4d5ad11b0cdd 100644 --- a/tutorials/asr/ASR_with_NeMo.ipynb +++ b/tutorials/asr/ASR_with_NeMo.ipynb @@ -1,38 +1,12 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "accelerator": "GPU", - "colab": { - "name": "ASR_with_NeMo.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.7" - } - }, "cells": [ { "cell_type": "code", + "execution_count": null, "metadata": { "id": "lJz6FDU1lRzc" }, + "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -53,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", @@ -62,9 +36,7 @@ "that you want to use the \"Run All Cells\" (or similar) option.\n", "\"\"\"\n", "# exit()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -157,9 +129,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "gAhsmi6HlRzh" }, + "outputs": [], "source": [ "import os\n", "# This is where the an4/ directory will be placed.\n", @@ -168,16 +142,16 @@ "\n", "if not os.path.exists(data_dir):\n", " os.makedirs(data_dir)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Yb4fuUvWlRzk", "scrolled": true }, + "outputs": [], "source": [ "import glob\n", "import os\n", @@ -207,9 +181,7 @@ " cmd = [\"sox\", sph_path, wav_path]\n", " subprocess.run(cmd)\n", "print(\"Finished conversion.\\n******\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -224,9 +196,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "_M_bSs3MjQlz" }, + "outputs": [], "source": [ "import librosa\n", "import IPython.display as ipd\n", @@ -236,9 +210,7 @@ "audio, sample_rate = librosa.load(example_file)\n", "\n", "ipd.Audio(example_file, rate=sample_rate)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -253,9 +225,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "MqIAKkqelRzm" }, + "outputs": [], "source": [ "%matplotlib inline\n", "import librosa.display\n", @@ -267,9 +241,7 @@ "plt.ylabel('Amplitude')\n", "\n", "_ = librosa.display.waveshow(audio)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -291,9 +263,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "oCFneEs1lRzp" }, + "outputs": [], "source": [ "import numpy as np\n", "\n", @@ -305,9 +279,7 @@ "librosa.display.specshow(spec_db, y_axis='log', x_axis='time')\n", "plt.colorbar()\n", "plt.title('Audio Spectrogram');" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -324,9 +296,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "7yQXVn-TlRzt" }, + "outputs": [], "source": [ "# Plot the mel spectrogram of our sample\n", "mel_spec = librosa.feature.melspectrogram(audio, sr=sample_rate)\n", @@ -336,9 +310,7 @@ " mel_spec_db, x_axis='time', y_axis='mel')\n", "plt.colorbar()\n", "plt.title('Mel Spectrogram');" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -383,18 +355,18 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "4_W0lhaQlRzx" }, + "outputs": [], "source": [ "# NeMo's \"core\" package\n", "import nemo\n", "# NeMo's ASR collection - this collections contains complete ASR models and\n", "# building blocks (modules) for ASR\n", "import nemo.collections.asr as nemo_asr" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -409,15 +381,15 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "KFZZpYult96G" }, + "outputs": [], "source": [ "# This line will download pre-trained QuartzNet15x5 model from NVIDIA's NGC cloud and instantiate it for you\n", "quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name=\"QuartzNet15x5Base-En\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -430,16 +402,16 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "3QCpR_93u1hp" }, + "outputs": [], "source": [ "files = [os.path.join(data_dir, 'an4/wav/an4_clstk/mgah/cen2-mgah-b.wav')]\n", "for fname, transcription in zip(files, quartznet.transcribe(paths2audio_files=files)):\n", " print(f\"Audio in {fname} was recognized as: {transcription}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -487,9 +459,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "lVB1sG1GlRzz" }, + "outputs": [], "source": [ "# --- Building Manifest Files --- #\n", "import json\n", @@ -536,9 +510,7 @@ " build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk')\n", " print(\"Test manifest created.\")\n", "print(\"***Done***\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -574,9 +546,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "PXVKBniMlRz5" }, + "outputs": [], "source": [ "# --- Config Information ---#\n", "try:\n", @@ -587,7 +561,7 @@ "\n", "if not os.path.exists(config_path):\n", " # Grab the config we'll use in this example\n", - " BRANCH = 'r1.13.0'\n", + " BRANCH = 'main'\n", " !mkdir configs\n", " !wget -P configs/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/examples/asr/conf/config.yaml\n", "\n", @@ -595,9 +569,7 @@ "with open(config_path) as f:\n", " params = yaml.load(f)\n", "print(params)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -614,15 +586,15 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "GUfR6tAK0k2u" }, + "outputs": [], "source": [ "import pytorch_lightning as pl\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=50)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -636,17 +608,17 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Cbf0fsMK09lk" }, + "outputs": [], "source": [ "from omegaconf import DictConfig\n", "params['model']['train_ds']['manifest_filepath'] = train_manifest\n", "params['model']['validation_ds']['manifest_filepath'] = test_manifest\n", "first_asr_model = nemo_asr.models.EncDecCTCModel(cfg=DictConfig(params['model']), trainer=trainer)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -659,15 +631,15 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "inRJsnrz1psq" }, + "outputs": [], "source": [ "# Start training!!!\n", "trainer.fit(first_asr_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -685,9 +657,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "n_0y3stSXDX_" }, + "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -701,9 +675,7 @@ " %tensorboard --logdir lightning_logs/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -716,14 +688,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "7kdQbpohXnEd" }, + "outputs": [], "source": [ "print(params['model']['optim'])" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -736,18 +708,18 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "AbigFKUtYgvn" }, + "outputs": [], "source": [ "import copy\n", "new_opt = copy.deepcopy(params['model']['optim'])\n", "new_opt['lr'] = 0.001\n", "first_asr_model.setup_optimization(optim_config=DictConfig(new_opt))\n", "# And then you can invoke trainer.fit(first_asr_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -764,9 +736,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "3FT0klSV268p" }, + "outputs": [], "source": [ "paths2audio_files = [os.path.join(data_dir, 'an4/wav/an4_clstk/mgah/cen2-mgah-b.wav'),\n", " os.path.join(data_dir, 'an4/wav/an4_clstk/fmjd/cen7-fmjd-b.wav'),\n", @@ -774,9 +748,7 @@ " os.path.join(data_dir, 'an4/wav/an4_clstk/fkai/cen8-fkai-b.wav')]\n", "print(first_asr_model.transcribe(paths2audio_files=paths2audio_files,\n", " batch_size=4))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -789,9 +761,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "7mP4r1Gx_Ilt" }, + "outputs": [], "source": [ "# Bigger batch-size = bigger throughput\n", "params['model']['validation_ds']['batch_size'] = 16\n", @@ -830,9 +804,7 @@ "\n", "# We need to sum all numerators and denominators first. Then divide.\n", "print(f\"WER = {sum(wer_nums)/sum(wer_denoms)}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -864,14 +836,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "9glGogaPlR0H" }, + "outputs": [], "source": [ "print(quartznet._cfg['spec_augment'])" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -899,9 +871,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "hl320dsydWX0" }, + "outputs": [], "source": [ "# Check what kind of vocabulary/alphabet the model has right now\n", "print(quartznet.decoder.vocabulary)\n", @@ -914,9 +888,7 @@ " 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', \"'\", \"!\"\n", " ]\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -929,9 +901,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "_PZJIso-eDl-" }, + "outputs": [], "source": [ "# Use the smaller learning rate we set before\n", "quartznet.setup_optimization(optim_config=DictConfig(new_opt))\n", @@ -945,9 +919,7 @@ "# And now we can create a PyTorch Lightning trainer and call `fit` again.\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=2)\n", "trainer.fit(quartznet)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -992,9 +964,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "I4WRcmakjQnj" }, + "outputs": [], "source": [ "!pip install --upgrade onnxruntime # for gpu, use onnxruntime-gpu\n", "#!mkdir -p ort\n", @@ -1006,9 +980,7 @@ "#!pip uninstall -y onnxruntime-gpu\n", "#!pip install --upgrade --force-reinstall ./build/Linux/Release/dist/onnxruntime*.whl\n", "#%cd .." - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1021,9 +993,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "HZnyWxPyjQnm" }, + "outputs": [], "source": [ "import json\n", "import os\n", @@ -1100,9 +1074,7 @@ " hypotheses, _ = wer.decoding.ctc_decoder_predictions_tensor(greedy_predictions)\n", " print(hypotheses)\n", " break\n" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1164,12 +1136,40 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "V3ERGX86lR0V" }, - "source": [], - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "ASR_with_NeMo.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" } - ] -} \ No newline at end of file + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb index 224984b64cca..7402931ece05 100644 --- a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb +++ b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb @@ -1,26 +1,12 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "ASR_with_Subword_Tokenization.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3", - "language": "python" - }, - "accelerator": "GPU" - }, "cells": [ { "cell_type": "code", + "execution_count": null, "metadata": { "id": "HqBQwLAsme9b" }, + "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -40,7 +26,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", @@ -53,9 +39,7 @@ "that you want to use the \"Run All Cells\" (or similar) option.\n", "\"\"\"\n", "# exit()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -63,10 +47,10 @@ "id": "jW8pMLX4EKb0" }, "source": [ - "# Automatic Speech Recognition with Subword Tokenization\r\n", - "\r\n", - "In the [ASR with NeMo notebook](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb), we discuss the pipeline necessary for Automatic Speech Recognition (ASR), and then use the NeMo toolkit to construct a functioning speech recognition model.\r\n", - "\r\n", + "# Automatic Speech Recognition with Subword Tokenization\n", + "\n", + "In the [ASR with NeMo notebook](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb), we discuss the pipeline necessary for Automatic Speech Recognition (ASR), and then use the NeMo toolkit to construct a functioning speech recognition model.\n", + "\n", "In this notebook, we take a step further and look into subword tokenization as a useful encoding scheme for ASR models, and why they are necessary. We then construct a custom tokenizer from the dataset, and use it to construct and train an ASR model on the [AN4 dataset from CMU](http://www.speech.cs.cmu.edu/databases/an4/) (with processing using `sox`)." ] }, @@ -76,16 +60,16 @@ "id": "w2pDg6jJLLVM" }, "source": [ - "## Subword Tokenization\r\n", - "\r\n", - "We begin with a short intro to what exactly is subword tokenization. If you are familiar with some Natural Language Processing terminologies, then you might have heard of the term \"subword\" frequently.\r\n", - "\r\n", - "So what is a subword in the first place? Simply put, it is either a single character or a group of characters. When combined according to a tokenization-detokenization algorithm, it generates a set of characters, words, or entire sentences. \r\n", - "\r\n", - "Many subword tokenization-detokenization algorithms exist, which can be built using large corpora of text data to tokenize and detokenize the data to and from subwords effectively. Some of the most commonly used subword tokenization methods are [Byte Pair Encoding](https://arxiv.org/abs/1508.07909), [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) and [Sentence Piece Encoding](https://www.aclweb.org/anthology/D18-2012/), to name just a few.\r\n", - "\r\n", - "------\r\n", - "\r\n", + "## Subword Tokenization\n", + "\n", + "We begin with a short intro to what exactly is subword tokenization. If you are familiar with some Natural Language Processing terminologies, then you might have heard of the term \"subword\" frequently.\n", + "\n", + "So what is a subword in the first place? Simply put, it is either a single character or a group of characters. When combined according to a tokenization-detokenization algorithm, it generates a set of characters, words, or entire sentences. \n", + "\n", + "Many subword tokenization-detokenization algorithms exist, which can be built using large corpora of text data to tokenize and detokenize the data to and from subwords effectively. Some of the most commonly used subword tokenization methods are [Byte Pair Encoding](https://arxiv.org/abs/1508.07909), [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) and [Sentence Piece Encoding](https://www.aclweb.org/anthology/D18-2012/), to name just a few.\n", + "\n", + "------\n", + "\n", "Here, we will show a short demo on why subword tokenization is necessary for Automatic Speech Recognition under certain situations and its benefits to the model in terms of efficiency and accuracy." ] }, @@ -100,17 +84,17 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "M_MQ7NLlBbup" }, + "outputs": [], "source": [ - "TEXT_CORPUS = [\r\n", - " \"hello world\",\r\n", - " \"today is a good day\",\r\n", + "TEXT_CORPUS = [\n", + " \"hello world\",\n", + " \"today is a good day\",\n", "]" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -123,23 +107,23 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "3tusMof9QMs7" }, + "outputs": [], "source": [ - "def char_tokenize(text):\r\n", - " tokens = []\r\n", - " for char in text:\r\n", - " tokens.append(ord(char))\r\n", - " return tokens\r\n", - "\r\n", - "def char_detokenize(tokens):\r\n", - " tokens = [chr(t) for t in tokens]\r\n", - " text = \"\".join(tokens)\r\n", + "def char_tokenize(text):\n", + " tokens = []\n", + " for char in text:\n", + " tokens.append(ord(char))\n", + " return tokens\n", + "\n", + "def char_detokenize(tokens):\n", + " tokens = [chr(t) for t in tokens]\n", + " text = \"\".join(tokens)\n", " return text" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -152,17 +136,17 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "2stpuRsNQpMJ" }, + "outputs": [], "source": [ - "char_tokens = char_tokenize(TEXT_CORPUS[0])\r\n", - "print(\"Tokenized tokens :\", char_tokens)\r\n", - "text = char_detokenize(char_tokens)\r\n", + "char_tokens = char_tokenize(TEXT_CORPUS[0])\n", + "print(\"Tokenized tokens :\", char_tokens)\n", + "text = char_detokenize(char_tokens)\n", "print(\"Detokenized text :\", text)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -170,48 +154,48 @@ "id": "gY6G6Ow1RSf4" }, "source": [ - "-----\r\n", - "Great! The character tokenizer did its job correctly - each character is separated as an individual token, and they can be reconstructed into precisely the original text!\r\n", - "\r\n", + "-----\n", + "Great! The character tokenizer did its job correctly - each character is separated as an individual token, and they can be reconstructed into precisely the original text!\n", + "\n", "Now let's create a simple dictionary-based tokenizer - it will have a select set of subwords that it will use to map tokens back and forth. Note - to simplify the technique's demonstration; we will use a vocabulary with entire words. However, note that this is an uncommon occurrence unless the vocabulary sizes are huge when built on natural text." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Mhn2MxODRNTv" }, + "outputs": [], "source": [ - "def dict_tokenize(text, vocabulary):\r\n", - " tokens = []\r\n", - "\r\n", - " # first do full word searches\r\n", - " split_text = text.split()\r\n", - " for split in split_text:\r\n", - " if split in vocabulary:\r\n", - " tokens.append(vocabulary[split])\r\n", - " else:\r\n", - " chars = list(split)\r\n", - " t_chars = [vocabulary[c] for c in chars]\r\n", - " tokens.extend(t_chars)\r\n", - " tokens.append(vocabulary[\" \"])\r\n", - "\r\n", - " # remove extra space token\r\n", - " tokens.pop(-1)\r\n", - " return tokens\r\n", - "\r\n", - "def dict_detokenize(tokens, vocabulary):\r\n", - " text = \"\"\r\n", - " reverse_vocab = {v: k for k, v in vocabulary.items()}\r\n", - " for token in tokens:\r\n", - " if token in reverse_vocab:\r\n", - " text = text + reverse_vocab[token]\r\n", - " else:\r\n", - " text = text + \"\".join(token)\r\n", + "def dict_tokenize(text, vocabulary):\n", + " tokens = []\n", + "\n", + " # first do full word searches\n", + " split_text = text.split()\n", + " for split in split_text:\n", + " if split in vocabulary:\n", + " tokens.append(vocabulary[split])\n", + " else:\n", + " chars = list(split)\n", + " t_chars = [vocabulary[c] for c in chars]\n", + " tokens.extend(t_chars)\n", + " tokens.append(vocabulary[\" \"])\n", + "\n", + " # remove extra space token\n", + " tokens.pop(-1)\n", + " return tokens\n", + "\n", + "def dict_detokenize(tokens, vocabulary):\n", + " text = \"\"\n", + " reverse_vocab = {v: k for k, v in vocabulary.items()}\n", + " for token in tokens:\n", + " if token in reverse_vocab:\n", + " text = text + reverse_vocab[token]\n", + " else:\n", + " text = text + \"\".join(token)\n", " return text" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -224,34 +208,34 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "rone69s8Ui3q" }, + "outputs": [], "source": [ - "vocabulary = {chr(i + ord(\"a\")) : (i + 1) for i in range(26)}\r\n", - "# add whole words and special tokens\r\n", - "vocabulary[\" \"] = 0\r\n", - "vocabulary[\"hello\"] = len(vocabulary) + 1\r\n", - "vocabulary[\"today\"] = len(vocabulary) + 1\r\n", - "vocabulary[\"good\"] = len(vocabulary) + 1\r\n", + "vocabulary = {chr(i + ord(\"a\")) : (i + 1) for i in range(26)}\n", + "# add whole words and special tokens\n", + "vocabulary[\" \"] = 0\n", + "vocabulary[\"hello\"] = len(vocabulary) + 1\n", + "vocabulary[\"today\"] = len(vocabulary) + 1\n", + "vocabulary[\"good\"] = len(vocabulary) + 1\n", "print(vocabulary)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "sGLGaLtXUgrN" }, + "outputs": [], "source": [ - "dict_tokens = dict_tokenize(TEXT_CORPUS[0], vocabulary)\r\n", - "print(\"Tokenized tokens :\", dict_tokens)\r\n", - "text = dict_detokenize(dict_tokens, vocabulary)\r\n", + "dict_tokens = dict_tokenize(TEXT_CORPUS[0], vocabulary)\n", + "print(\"Tokenized tokens :\", dict_tokens)\n", + "text = dict_detokenize(dict_tokens, vocabulary)\n", "print(\"Detokenized text :\", text)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -259,25 +243,25 @@ "id": "rUETSbM-XYUl" }, "source": [ - "------\r\n", - "Great! Our dictionary tokenizer works well and tokenizes-detokenizes the data correctly.\r\n", - "\r\n", - "You might be wondering - why did we have to go through all this trouble to tokenize and detokenize data if we get back the same thing?\r\n", - "\r\n", + "------\n", + "Great! Our dictionary tokenizer works well and tokenizes-detokenizes the data correctly.\n", + "\n", + "You might be wondering - why did we have to go through all this trouble to tokenize and detokenize data if we get back the same thing?\n", + "\n", "For ASR - the hidden benefit lies in the length of the tokenized representation!" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "eZFGuLqUVhLW" }, + "outputs": [], "source": [ - "print(\"Character tokenization length -\", len(char_tokens))\r\n", + "print(\"Character tokenization length -\", len(char_tokens))\n", "print(\"Dict tokenization length -\", len(dict_tokens))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -285,8 +269,8 @@ "id": "vw6jJD8eYJpK" }, "source": [ - "By having the whole word \"hello\" in our tokenizer's dictionary, we could reduce the length of the tokenized data by four tokens and still represent the same information!\r\n", - "\r\n", + "By having the whole word \"hello\" in our tokenizer's dictionary, we could reduce the length of the tokenized data by four tokens and still represent the same information!\n", + "\n", "Actual subword algorithms like the ones discussed above go several steps further - they partition whole words based on occurrence in text and build tokens for them too! So instead of wasting 5 tokens for `[\"h\", \"e\", \"l\", \"l\", \"o\"]`, we can represent it as `[\"hel##\", \"##lo\"]` and then merge the `##` tokens together to get back `hello` by using just 2 tokens !" ] }, @@ -296,25 +280,25 @@ "id": "hcCbVA3GY-TZ" }, "source": [ - "## The necessity of subword tokenization\r\n", - "\r\n", - "It has been found via extensive research in the domain of Neural Machine Translation and Language Modelling (and its variants), that subword tokenization not only reduces the length of the tokenized representation (thereby making sentences shorter and more manageable for models to learn), but also boosts the accuracy of prediction of correct tokens (refer to the earlier cited papers).\r\n", - "\r\n", - "You might remember that earlier; we mentioned subword tokenization as a necessity rather than just a nice-to-have component for ASR. In the previous tutorial, we used the [Connectionist Temporal Classification](https://www.cs.toronto.edu/~graves/icml_2006.pdf) loss function to train the model, but this loss function has a few limitations- \r\n", - "\r\n", - " - **Generated tokens are conditionally independent of each other**. In other words - the probability of character \"l\" being predicted after \"hel##\" is conditionally independent of the previous token - so any other token can also be predicted unless the model has future information!\r\n", - " - **The length of the generated (target) sequence must be shorter than that of the source sequence.** \r\n", - "\r\n", - "------\r\n", - "\r\n", - "It turns out - subword tokenization helps alleviate both of these issues!\r\n", - "\r\n", - " - Sophisticated subword tokenization algorithms build their vocabularies based on large text corpora. To accurately tokenize such large volumes of text with minimal vocabulary size, the subwords that are learned inherently model the interdependency between tokens of that language to some degree. \r\n", - " \r\n", - "Looking at the previous example, the token `hel##` is a single token that represents the relationship `h` => `e` => `l`. When the model predicts the singe token `hel##`, it implicitly predicts this relationship - even though the subsequent token can be either `l` (for `hell`) or `##lo` (for `hello`) and is predicted independently of the previous token!\r\n", - "\r\n", - " - By reducing the target sentence length by subword tokenization (target sentence here being the characters/subwords transcribed from the audio signal), we entirely sidestep the sequence length limitation of CTC loss!\r\n", - "\r\n", + "## The necessity of subword tokenization\n", + "\n", + "It has been found via extensive research in the domain of Neural Machine Translation and Language Modelling (and its variants), that subword tokenization not only reduces the length of the tokenized representation (thereby making sentences shorter and more manageable for models to learn), but also boosts the accuracy of prediction of correct tokens (refer to the earlier cited papers).\n", + "\n", + "You might remember that earlier; we mentioned subword tokenization as a necessity rather than just a nice-to-have component for ASR. In the previous tutorial, we used the [Connectionist Temporal Classification](https://www.cs.toronto.edu/~graves/icml_2006.pdf) loss function to train the model, but this loss function has a few limitations- \n", + "\n", + " - **Generated tokens are conditionally independent of each other**. In other words - the probability of character \"l\" being predicted after \"hel##\" is conditionally independent of the previous token - so any other token can also be predicted unless the model has future information!\n", + " - **The length of the generated (target) sequence must be shorter than that of the source sequence.** \n", + "\n", + "------\n", + "\n", + "It turns out - subword tokenization helps alleviate both of these issues!\n", + "\n", + " - Sophisticated subword tokenization algorithms build their vocabularies based on large text corpora. To accurately tokenize such large volumes of text with minimal vocabulary size, the subwords that are learned inherently model the interdependency between tokens of that language to some degree. \n", + " \n", + "Looking at the previous example, the token `hel##` is a single token that represents the relationship `h` => `e` => `l`. When the model predicts the singe token `hel##`, it implicitly predicts this relationship - even though the subsequent token can be either `l` (for `hell`) or `##lo` (for `hello`) and is predicted independently of the previous token!\n", + "\n", + " - By reducing the target sentence length by subword tokenization (target sentence here being the characters/subwords transcribed from the audio signal), we entirely sidestep the sequence length limitation of CTC loss!\n", + "\n", "This means we can perform a larger number of pooling steps in our acoustic models, thereby improving execution speed while simultaneously reducing memory requirements." ] }, @@ -324,8 +308,8 @@ "id": "KAFSGJRAeTe6" }, "source": [ - "# Building a custom subword tokenizer\r\n", - "\r\n", + "# Building a custom subword tokenizer\n", + "\n", "After all that talk about subword tokenization, let's finally build a custom tokenizer for our ASR model! While the `AN4` dataset is simple enough to be trained using character-based models, its small size is also perfect for a demonstration on a notebook." ] }, @@ -335,64 +319,64 @@ "id": "Ire6cSmEe2GU" }, "source": [ - "## Preparing the dataset (AN4)\r\n", - "\r\n", - "The AN4 dataset, also known as the Alphanumeric dataset, was collected and published by Carnegie Mellon University. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, and their corresponding transcripts. We choose to use AN4 for this tutorial because it is relatively small, with 948 training and 130 test utterances, and so it trains quickly.\r\n", - "\r\n", + "## Preparing the dataset (AN4)\n", + "\n", + "The AN4 dataset, also known as the Alphanumeric dataset, was collected and published by Carnegie Mellon University. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, and their corresponding transcripts. We choose to use AN4 for this tutorial because it is relatively small, with 948 training and 130 test utterances, and so it trains quickly.\n", + "\n", "Before we get started, let's download and prepare the dataset. The utterances are available as `.sph` files, so we will need to convert them to `.wav` for later processing. If you are not using Google Colab, please make sure you have [Sox](http://sox.sourceforge.net/) installed for this step--see the \"Downloads\" section of the linked Sox homepage. (If you are using Google Colab, Sox should have already been installed in the setup cell at the beginning.)" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "dLB_KedzYHCw" }, + "outputs": [], "source": [ "# This is where the an4/ directory will be placed.\n", "# Change this if you don't want the data to be extracted in the current directory.\n", "# The directory should exist.\n", "data_dir = \".\"" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "AsHdRslhe-7W" }, + "outputs": [], "source": [ - "import glob\r\n", - "import os\r\n", - "import subprocess\r\n", - "import tarfile\r\n", - "import wget\r\n", - "\r\n", - "# Download the dataset. This will take a few moments...\r\n", - "print(\"******\")\r\n", - "if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):\r\n", - " an4_url = 'https://dldata-public.s3.us-east-2.amazonaws.com/an4_sphere.tar.gz' # for the original source, please visit http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz \r\n", - " an4_path = wget.download(an4_url, data_dir)\r\n", - " print(f\"Dataset downloaded at: {an4_path}\")\r\n", - "else:\r\n", - " print(\"Tarfile already exists.\")\r\n", - " an4_path = data_dir + '/an4_sphere.tar.gz'\r\n", - "\r\n", - "if not os.path.exists(data_dir + '/an4/'):\r\n", - " # Untar and convert .sph to .wav (using sox)\r\n", - " tar = tarfile.open(an4_path)\r\n", - " tar.extractall(path=data_dir)\r\n", - "\r\n", - " print(\"Converting .sph to .wav...\")\r\n", - " sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)\r\n", - " for sph_path in sph_list:\r\n", - " wav_path = sph_path[:-4] + '.wav'\r\n", - " cmd = [\"sox\", sph_path, wav_path]\r\n", - " subprocess.run(cmd)\r\n", + "import glob\n", + "import os\n", + "import subprocess\n", + "import tarfile\n", + "import wget\n", + "\n", + "# Download the dataset. This will take a few moments...\n", + "print(\"******\")\n", + "if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):\n", + " an4_url = 'https://dldata-public.s3.us-east-2.amazonaws.com/an4_sphere.tar.gz' # for the original source, please visit http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz \n", + " an4_path = wget.download(an4_url, data_dir)\n", + " print(f\"Dataset downloaded at: {an4_path}\")\n", + "else:\n", + " print(\"Tarfile already exists.\")\n", + " an4_path = data_dir + '/an4_sphere.tar.gz'\n", + "\n", + "if not os.path.exists(data_dir + '/an4/'):\n", + " # Untar and convert .sph to .wav (using sox)\n", + " tar = tarfile.open(an4_path)\n", + " tar.extractall(path=data_dir)\n", + "\n", + " print(\"Converting .sph to .wav...\")\n", + " sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)\n", + " for sph_path in sph_list:\n", + " wav_path = sph_path[:-4] + '.wav'\n", + " cmd = [\"sox\", sph_path, wav_path]\n", + " subprocess.run(cmd)\n", "print(\"Finished conversion.\\n******\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -400,7 +384,7 @@ "id": "6kOuy-OWfUWn" }, "source": [ - "You should now have a folder called `an4` that contains `etc/an4_train.transcription`, `etc/an4_test.transcription`, audio files in `wav/an4_clstk` and `wav/an4test_clstk`, along with some other files we will not need.\r\n" + "You should now have a folder called `an4` that contains `etc/an4_train.transcription`, `etc/an4_test.transcription`, audio files in `wav/an4_clstk` and `wav/an4test_clstk`, along with some other files we will not need.\n" ] }, { @@ -409,79 +393,79 @@ "id": "S2S--I3kftF0" }, "source": [ - "## Creating Data Manifests\r\n", - "\r\n", - "The first thing we need to do now is to create manifests for our training and evaluation data, which will contain the metadata of our audio files. NeMo data sets take in a standardized manifest format where each line corresponds to one sample of audio, such that the number of lines in a manifest is equal to the number of samples that are represented by that manifest. A line must contain the path to an audio file, the corresponding transcript (or path to a transcript file), and the duration of the audio sample.\r\n", - "\r\n", - "Here's an example of what one line in a NeMo-compatible manifest might look like:\r\n", - "```\r\n", - "{\"audio_filepath\": \"path/to/audio.wav\", \"duration\": 3.45, \"text\": \"this is a nemo tutorial\"}\r\n", - "```\r\n", - "\r\n", - "We can build our training and evaluation manifests using `an4/etc/an4_train.transcription` and `an4/etc/an4_test.transcription`, which have lines containing transcripts and their corresponding audio file IDs:\r\n", - "```\r\n", - "...\r\n", - " P I T T S B U R G H (cen5-fash-b)\r\n", - " TWO SIX EIGHT FOUR FOUR ONE EIGHT (cen7-fash-b)\r\n", - "...\r\n", + "## Creating Data Manifests\n", + "\n", + "The first thing we need to do now is to create manifests for our training and evaluation data, which will contain the metadata of our audio files. NeMo data sets take in a standardized manifest format where each line corresponds to one sample of audio, such that the number of lines in a manifest is equal to the number of samples that are represented by that manifest. A line must contain the path to an audio file, the corresponding transcript (or path to a transcript file), and the duration of the audio sample.\n", + "\n", + "Here's an example of what one line in a NeMo-compatible manifest might look like:\n", + "```\n", + "{\"audio_filepath\": \"path/to/audio.wav\", \"duration\": 3.45, \"text\": \"this is a nemo tutorial\"}\n", + "```\n", + "\n", + "We can build our training and evaluation manifests using `an4/etc/an4_train.transcription` and `an4/etc/an4_test.transcription`, which have lines containing transcripts and their corresponding audio file IDs:\n", + "```\n", + "...\n", + " P I T T S B U R G H (cen5-fash-b)\n", + " TWO SIX EIGHT FOUR FOUR ONE EIGHT (cen7-fash-b)\n", + "...\n", "```" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "sFyGsk80fRp7" }, + "outputs": [], "source": [ - "# --- Building Manifest Files --- #\r\n", - "import json\r\n", - "import librosa\r\n", - "\r\n", - "# Function to build a manifest\r\n", - "def build_manifest(transcripts_path, manifest_path, wav_path):\r\n", - " with open(transcripts_path, 'r') as fin:\r\n", - " with open(manifest_path, 'w') as fout:\r\n", - " for line in fin:\r\n", - " # Lines look like this:\r\n", - " # transcript (fileID)\r\n", - " transcript = line[: line.find('(')-1].lower()\r\n", - " transcript = transcript.replace('', '').replace('', '')\r\n", - " transcript = transcript.strip()\r\n", - "\r\n", - " file_id = line[line.find('(')+1 : -2] # e.g. \"cen4-fash-b\"\r\n", - " audio_path = os.path.join(\r\n", - " data_dir, wav_path,\r\n", - " file_id[file_id.find('-')+1 : file_id.rfind('-')],\r\n", - " file_id + '.wav')\r\n", - "\r\n", - " duration = librosa.core.get_duration(filename=audio_path)\r\n", - "\r\n", - " # Write the metadata to the manifest\r\n", - " metadata = {\r\n", - " \"audio_filepath\": audio_path,\r\n", - " \"duration\": duration,\r\n", - " \"text\": transcript\r\n", - " }\r\n", - " json.dump(metadata, fout)\r\n", - " fout.write('\\n')\r\n", - " \r\n", - "# Building Manifests\r\n", - "print(\"******\")\r\n", - "train_transcripts = data_dir + '/an4/etc/an4_train.transcription'\r\n", - "train_manifest = data_dir + '/an4/train_manifest.json'\r\n", - "if not os.path.isfile(train_manifest):\r\n", - " build_manifest(train_transcripts, train_manifest, 'an4/wav/an4_clstk')\r\n", - " print(\"Training manifest created.\")\r\n", - "\r\n", - "test_transcripts = data_dir + '/an4/etc/an4_test.transcription'\r\n", - "test_manifest = data_dir + '/an4/test_manifest.json'\r\n", - "if not os.path.isfile(test_manifest):\r\n", - " build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk')\r\n", - " print(\"Test manifest created.\")\r\n", + "# --- Building Manifest Files --- #\n", + "import json\n", + "import librosa\n", + "\n", + "# Function to build a manifest\n", + "def build_manifest(transcripts_path, manifest_path, wav_path):\n", + " with open(transcripts_path, 'r') as fin:\n", + " with open(manifest_path, 'w') as fout:\n", + " for line in fin:\n", + " # Lines look like this:\n", + " # transcript (fileID)\n", + " transcript = line[: line.find('(')-1].lower()\n", + " transcript = transcript.replace('', '').replace('', '')\n", + " transcript = transcript.strip()\n", + "\n", + " file_id = line[line.find('(')+1 : -2] # e.g. \"cen4-fash-b\"\n", + " audio_path = os.path.join(\n", + " data_dir, wav_path,\n", + " file_id[file_id.find('-')+1 : file_id.rfind('-')],\n", + " file_id + '.wav')\n", + "\n", + " duration = librosa.core.get_duration(filename=audio_path)\n", + "\n", + " # Write the metadata to the manifest\n", + " metadata = {\n", + " \"audio_filepath\": audio_path,\n", + " \"duration\": duration,\n", + " \"text\": transcript\n", + " }\n", + " json.dump(metadata, fout)\n", + " fout.write('\\n')\n", + " \n", + "# Building Manifests\n", + "print(\"******\")\n", + "train_transcripts = data_dir + '/an4/etc/an4_train.transcription'\n", + "train_manifest = data_dir + '/an4/train_manifest.json'\n", + "if not os.path.isfile(train_manifest):\n", + " build_manifest(train_transcripts, train_manifest, 'an4/wav/an4_clstk')\n", + " print(\"Training manifest created.\")\n", + "\n", + "test_transcripts = data_dir + '/an4/etc/an4_test.transcription'\n", + "test_manifest = data_dir + '/an4/test_manifest.json'\n", + "if not os.path.isfile(test_manifest):\n", + " build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk')\n", + " print(\"Test manifest created.\")\n", "print(\"***Done***\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -494,14 +478,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "PSv_wZTQf50U" }, + "outputs": [], "source": [ "!head -n 5 {data_dir}/an4/train_manifest.json" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -509,25 +493,25 @@ "id": "3S80tsTHhDmU" }, "source": [ - "## Build a custom tokenizer\r\n", - "\r\n", - "Next, we will use a NeMo script to easily build a tokenizer for the above dataset. The script takes a few arguments, which will be explained in detail.\r\n", - "\r\n", + "## Build a custom tokenizer\n", + "\n", + "Next, we will use a NeMo script to easily build a tokenizer for the above dataset. The script takes a few arguments, which will be explained in detail.\n", + "\n", "First, download the tokenizer creation script from the nemo repository." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "ESHI2piTgJRO" }, + "outputs": [], "source": [ "if not os.path.exists(\"scripts/tokenizers/process_asr_text_tokenizer.py\"):\n", " !mkdir scripts\n", " !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tokenizers/process_asr_text_tokenizer.py" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -535,34 +519,36 @@ "id": "BkcpeYp1iIsU" }, "source": [ - "The script above takes a few important arguments -\r\n", - "\r\n", - " - either `--manifest` or `--data_file`: If your text data lies inside of an ASR manifest file, then use the `--manifest` path. If instead the text data is inside a file with separate lines corresponding to different text lines, then use `--data_file`. In either case, you can add commas to concatenate different manifests or different data files.\r\n", - "\r\n", - " - `--data_root`: The output directory (whose subdirectories will be created if not present) where the tokenizers will be placed.\r\n", - "\r\n", - " - `--vocab_size`: The size of the tokenizer vocabulary. Larger vocabularies can accommodate almost entire words, but the decoder size of any model will grow proportionally.\r\n", - "\r\n", - " - `--tokenizer`: Can be either `spe` or `wpe` . `spe` refers to the Google `sentencepiece` library tokenizer. `wpe` refers to the HuggingFace BERT Word Piece tokenizer. Please refer to the papers above for the relevant technique in order to select an appropriate tokenizer.\r\n", - "\r\n", - " - `--no_lower_case`: When this flag is passed, it will force the tokenizer to create separate tokens for upper and lower case characters. By default, the script will turn all the text to lower case before tokenization (and if upper case characters are passed during training/inference, the tokenizer will emit a token equivalent to Out-Of-Vocabulary). Used primarily for the English language. \r\n", - "\r\n", - " - `--spe_type`: The `sentencepiece` library has a few implementations of the tokenization technique, and `spe_type` refers to these implementations. Currently supported types are `unigram`, `bpe`, `char`, `word`. Defaults to `bpe`.\r\n", - "\r\n", - " - `--spe_character_coverage`: The `sentencepiece` library considers how much of the original vocabulary it should cover in its \"base set\" of tokens (akin to the lower and upper case characters of the English language). For almost all languages with small base token sets `(<1000 tokens)`, this should be kept at its default of 1.0. For languages with larger vocabularies (say Japanese, Mandarin, Korean etc), the suggested value is 0.9995.\r\n", - "\r\n", - " - `--spe_sample_size`: If the dataset is too large, consider using a sampled dataset indicated by a positive integer. By default, any negative value (default = -1) will use the entire dataset.\r\n", - "\r\n", - " - `--spe_train_extremely_large_corpus`: When training a sentencepiece tokenizer on very large amounts of text, sometimes the tokenizer will run out of memory or wont be able to process so much data on RAM. At some point you might receive the following error - \"Input corpus too large, try with train_extremely_large_corpus=true\". If your machine has large amounts of RAM, it might still be possible to build the tokenizer using the above flag. Will silently fail if it runs out of RAM.\r\n", - "\r\n", + "The script above takes a few important arguments -\n", + "\n", + " - either `--manifest` or `--data_file`: If your text data lies inside of an ASR manifest file, then use the `--manifest` path. If instead the text data is inside a file with separate lines corresponding to different text lines, then use `--data_file`. In either case, you can add commas to concatenate different manifests or different data files.\n", + "\n", + " - `--data_root`: The output directory (whose subdirectories will be created if not present) where the tokenizers will be placed.\n", + "\n", + " - `--vocab_size`: The size of the tokenizer vocabulary. Larger vocabularies can accommodate almost entire words, but the decoder size of any model will grow proportionally.\n", + "\n", + " - `--tokenizer`: Can be either `spe` or `wpe` . `spe` refers to the Google `sentencepiece` library tokenizer. `wpe` refers to the HuggingFace BERT Word Piece tokenizer. Please refer to the papers above for the relevant technique in order to select an appropriate tokenizer.\n", + "\n", + " - `--no_lower_case`: When this flag is passed, it will force the tokenizer to create separate tokens for upper and lower case characters. By default, the script will turn all the text to lower case before tokenization (and if upper case characters are passed during training/inference, the tokenizer will emit a token equivalent to Out-Of-Vocabulary). Used primarily for the English language. \n", + "\n", + " - `--spe_type`: The `sentencepiece` library has a few implementations of the tokenization technique, and `spe_type` refers to these implementations. Currently supported types are `unigram`, `bpe`, `char`, `word`. Defaults to `bpe`.\n", + "\n", + " - `--spe_character_coverage`: The `sentencepiece` library considers how much of the original vocabulary it should cover in its \"base set\" of tokens (akin to the lower and upper case characters of the English language). For almost all languages with small base token sets `(<1000 tokens)`, this should be kept at its default of 1.0. For languages with larger vocabularies (say Japanese, Mandarin, Korean etc), the suggested value is 0.9995.\n", + "\n", + " - `--spe_sample_size`: If the dataset is too large, consider using a sampled dataset indicated by a positive integer. By default, any negative value (default = -1) will use the entire dataset.\n", + "\n", + " - `--spe_train_extremely_large_corpus`: When training a sentencepiece tokenizer on very large amounts of text, sometimes the tokenizer will run out of memory or wont be able to process so much data on RAM. At some point you might receive the following error - \"Input corpus too large, try with train_extremely_large_corpus=true\". If your machine has large amounts of RAM, it might still be possible to build the tokenizer using the above flag. Will silently fail if it runs out of RAM.\n", + "\n", " - `--log`: Whether the script should display log messages" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "mAw4WMqbh6ii" }, + "outputs": [], "source": [ "!python ./scripts/process_asr_text_tokenizer.py \\\n", " --manifest=\"{data_dir}/an4/train_manifest.json\" \\\n", @@ -572,9 +558,7 @@ " --no_lower_case \\\n", " --spe_type=\"unigram\" \\\n", " --log" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -582,23 +566,23 @@ "id": "gaIFIKgol-p2" }, "source": [ - "-----\r\n", - "\r\n", - "That's it! Our tokenizer is now built and stored inside the `data_root` directory that we provided to the script.\r\n", - "\r\n", + "-----\n", + "\n", + "That's it! Our tokenizer is now built and stored inside the `data_root` directory that we provided to the script.\n", + "\n", "First we start by inspecting the tokenizer vocabulary itself. To keep it manageable, we will print just the first 10 tokens of the vocabulary:" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "0A9fSpr4l58u" }, + "outputs": [], "source": [ "!head -n 10 {data_dir}/tokenizers/an4/tokenizer_spe_unigram_v32/vocab.txt" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -606,31 +590,31 @@ "id": "kPuyTHGTm8Q-" }, "source": [ - "# Training an ASR Model with subword tokenization\r\n", - "\r\n", - "Now that our tokenizer is built, let's begin constructing an ASR model that will use this tokenizer for its dataset pre-processing and post-processing steps.\r\n", - "\r\n", - "We will use a Citrinet model to demonstrate the usage of subword tokenization models for training and inference. Citrinet is a [QuartzNet-like architecture](https://arxiv.org/abs/1910.10261), but it uses subword-tokenization along with 8x subsampling and [Squeeze-and-Excitation](https://arxiv.org/abs/1709.01507) to achieve strong accuracy in transcriptions while still using non-autoregressive decoding for efficient inference.\r\n", - "\r\n", - "We'll be using the **Neural Modules (NeMo) toolkit** for this part, so if you haven't already, you should download and install NeMo and its dependencies. To do so, just follow the directions on the [GitHub page](https://github.com/NVIDIA/NeMo), or in the [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/).\r\n", - "\r\n", + "# Training an ASR Model with subword tokenization\n", + "\n", + "Now that our tokenizer is built, let's begin constructing an ASR model that will use this tokenizer for its dataset pre-processing and post-processing steps.\n", + "\n", + "We will use a Citrinet model to demonstrate the usage of subword tokenization models for training and inference. Citrinet is a [QuartzNet-like architecture](https://arxiv.org/abs/1910.10261), but it uses subword-tokenization along with 8x subsampling and [Squeeze-and-Excitation](https://arxiv.org/abs/1709.01507) to achieve strong accuracy in transcriptions while still using non-autoregressive decoding for efficient inference.\n", + "\n", + "We'll be using the **Neural Modules (NeMo) toolkit** for this part, so if you haven't already, you should download and install NeMo and its dependencies. To do so, just follow the directions on the [GitHub page](https://github.com/NVIDIA/NeMo), or in the [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/).\n", + "\n", "NeMo let us easily hook together the components (modules) of our model, such as the data layer, intermediate layers, and various losses, without worrying too much about implementation details of individual parts or connections between modules. NeMo also comes with complete models which only require your data and hyperparameters for training." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "jALgpGLjmaCw" }, + "outputs": [], "source": [ - "# NeMo's \"core\" package\r\n", - "import nemo\r\n", - "# NeMo's ASR collection - this collections contains complete ASR models and\r\n", - "# building blocks (modules) for ASR\r\n", + "# NeMo's \"core\" package\n", + "import nemo\n", + "# NeMo's ASR collection - this collections contains complete ASR models and\n", + "# building blocks (modules) for ASR\n", "import nemo.collections.asr as nemo_asr" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -638,8 +622,8 @@ "id": "msxCiR8epEZu" }, "source": [ - "## Training from scratch\r\n", - "\r\n", + "## Training from scratch\n", + "\n", "To train from scratch, you need to prepare your training data in the right format and specify your models architecture." ] }, @@ -649,51 +633,51 @@ "id": "PasvgSEwpWXd" }, "source": [ - "### Specifying Our Model with a YAML Config File\r\n", - "\r\n", - "We'll build a *Citrinet* model for this tutorial and use *greedy CTC decoder*, using the configuration found in `./configs/citrinet_bpe.yaml`.\r\n", - "\r\n", - "If we open up this config file, we find model section which describes architecture of our model. A model contains an entry labeled `encoder`, with a field called `jasper` that contains a list with multiple entries. Each of the members in this list specifies one block in our model, and looks something like this:\r\n", - "```\r\n", - "- filters: 192\r\n", - " repeat: 5\r\n", - " kernel: [11]\r\n", - " stride: [1]\r\n", - " dilation: [1]\r\n", - " dropout: 0.0\r\n", - " residual: false\r\n", - " separable: true\r\n", - " se: true\r\n", - " se_context_size: -1\r\n", - "```\r\n", - "The first member of the list corresponds to the first block in the QuartzNet/Citrinet architecture diagram. \r\n", - "\r\n", - "Some entries at the top of the file specify how we will handle training (`train_ds`) and validation (`validation_ds`) data.\r\n", - "\r\n", + "### Specifying Our Model with a YAML Config File\n", + "\n", + "We'll build a *Citrinet* model for this tutorial and use *greedy CTC decoder*, using the configuration found in `./configs/citrinet_bpe.yaml`.\n", + "\n", + "If we open up this config file, we find model section which describes architecture of our model. A model contains an entry labeled `encoder`, with a field called `jasper` that contains a list with multiple entries. Each of the members in this list specifies one block in our model, and looks something like this:\n", + "```\n", + "- filters: 192\n", + " repeat: 5\n", + " kernel: [11]\n", + " stride: [1]\n", + " dilation: [1]\n", + " dropout: 0.0\n", + " residual: false\n", + " separable: true\n", + " se: true\n", + " se_context_size: -1\n", + "```\n", + "The first member of the list corresponds to the first block in the QuartzNet/Citrinet architecture diagram. \n", + "\n", + "Some entries at the top of the file specify how we will handle training (`train_ds`) and validation (`validation_ds`) data.\n", + "\n", "Using a YAML config such as this helps get a quick and human-readable overview of what your architecture looks like, and allows you to swap out model and run configurations easily without needing to change your code." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "XLUDyWOmo8xZ" }, + "outputs": [], "source": [ "from omegaconf import OmegaConf, open_dict" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "p1O8JRk1qXX9" }, + "outputs": [], "source": [ "params = OmegaConf.load(\"./configs/config_bpe.yaml\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -706,14 +690,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "raXzemtIqjL-" }, + "outputs": [], "source": [ "print(OmegaConf.to_yaml(params))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -721,29 +705,29 @@ "id": "Nw-8epOcuCcG" }, "source": [ - "## Specifying the tokenizer to the model\r\n", - "\r\n", - "Now that we have a model config, we are almost ready to train it ! We just have to inform it where the tokenizer directory exists and it will do the rest for us !\r\n", - "\r\n", - "We have to provide just two pieces of information via the config:\r\n", - "\r\n", - " - `tokenizer.dir`: The directory where the tokenizer files are stored\r\n", - " - `tokenizer.type`: Can be `bpe` (for `sentencepiece` based tokenizers) or `wpe` (for HuggingFace based BERT Word Piece Tokenizers. Represents what type of tokenizer is being supplied and parse its directory to construct the actual tokenizer.\r\n", - "\r\n", - "**Note**: We only have to provide the **directory** where the tokenizer file exists along with its vocabulary and any other essential components. We pass the directory instead of an explicit vocabulary path, since not all libraries construct their tokenizer in the same manner, so the model will figure out how it should prepare the tokenizer.\r\n" + "## Specifying the tokenizer to the model\n", + "\n", + "Now that we have a model config, we are almost ready to train it ! We just have to inform it where the tokenizer directory exists and it will do the rest for us !\n", + "\n", + "We have to provide just two pieces of information via the config:\n", + "\n", + " - `tokenizer.dir`: The directory where the tokenizer files are stored\n", + " - `tokenizer.type`: Can be `bpe` (for `sentencepiece` based tokenizers) or `wpe` (for HuggingFace based BERT Word Piece Tokenizers. Represents what type of tokenizer is being supplied and parse its directory to construct the actual tokenizer.\n", + "\n", + "**Note**: We only have to provide the **directory** where the tokenizer file exists along with its vocabulary and any other essential components. We pass the directory instead of an explicit vocabulary path, since not all libraries construct their tokenizer in the same manner, so the model will figure out how it should prepare the tokenizer.\n" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "YME-v0rcudUz" }, + "outputs": [], "source": [ - "params.model.tokenizer.dir = data_dir + \"/tokenizers/an4/tokenizer_spe_unigram_v32/\" # note this is a directory, not a path to a vocabulary file\r\n", + "params.model.tokenizer.dir = data_dir + \"/tokenizers/an4/tokenizer_spe_unigram_v32/\" # note this is a directory, not a path to a vocabulary file\n", "params.model.tokenizer.type = \"bpe\"" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -751,24 +735,24 @@ "id": "ceelkfIHrHTR" }, "source": [ - "### Training with PyTorch Lightning\r\n", - "\r\n", - "NeMo models and modules can be used in any PyTorch code where torch.nn.Module is expected.\r\n", - "\r\n", + "### Training with PyTorch Lightning\n", + "\n", + "NeMo models and modules can be used in any PyTorch code where torch.nn.Module is expected.\n", + "\n", "However, NeMo's models are based on [PytorchLightning's](https://github.com/PyTorchLightning/pytorch-lightning) LightningModule and we recommend you use PytorchLightning for training and fine-tuning as it makes using mixed precision and distributed training very easy. So to start, let's create Trainer instance for training on GPU for 50 epochs" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "3rslHEKeq9qy" }, + "outputs": [], "source": [ - "import pytorch_lightning as pl\r\n", + "import pytorch_lightning as pl\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=50)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -776,25 +760,25 @@ "id": "pLbXg1swre_M" }, "source": [ - "Next, we instantiate and ASR model based on our ``citrinet_bpe.yaml`` file from the previous section.\r\n", + "Next, we instantiate and ASR model based on our ``citrinet_bpe.yaml`` file from the previous section.\n", "Note that this is a stage during which we also tell the model where our training and validation manifests are." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "v7RnwRpprb2S" }, + "outputs": [], "source": [ - "# Update paths to dataset\r\n", - "params.model.train_ds.manifest_filepath = train_manifest\r\n", - "params.model.validation_ds.manifest_filepath = test_manifest\r\n", - "\r\n", - "# remove spec augment for this dataset\r\n", + "# Update paths to dataset\n", + "params.model.train_ds.manifest_filepath = train_manifest\n", + "params.model.validation_ds.manifest_filepath = test_manifest\n", + "\n", + "# remove spec augment for this dataset\n", "params.model.spec_augment.rect_masks = 0" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -802,21 +786,21 @@ "id": "2qLDHHOOx8T1" }, "source": [ - "Note the subtle difference in the model that we instantiate - `EncDecCTCModelBPE` instead of `EncDecCTCModel`. \r\n", - "\r\n", + "Note the subtle difference in the model that we instantiate - `EncDecCTCModelBPE` instead of `EncDecCTCModel`. \n", + "\n", "`EncDecCTCModelBPE` is nearly identical to `EncDecCTCModel` (it is in fact a subclass!) that simply adds support for subword tokenization." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "YVNc9IxdwXp7" }, + "outputs": [], "source": [ "first_asr_model = nemo_asr.models.EncDecCTCModelBPE(cfg=params.model, trainer=trainer)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -824,15 +808,17 @@ "id": "gJd4gE1uzCuO" }, "source": [ - "### Training: Monitoring Progress\r\n", + "### Training: Monitoring Progress\n", "We can now start Tensorboard to see how training went. Recall that WER stands for Word Error Rate and so the lower it is, the better." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "50qMnqagy8VM" }, + "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -846,9 +832,7 @@ " %tensorboard --logdir lightning_logs/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -861,15 +845,15 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "_iFfkFBTryQn" }, + "outputs": [], "source": [ - "# Start training!!!\r\n", + "# Start training!!!\n", "trainer.fit(first_asr_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -877,32 +861,32 @@ "id": "HQ2aSenF90hs" }, "source": [ - "Save the model easily along with the tokenizer using `save_to`. \r\n", - "\r\n", + "Save the model easily along with the tokenizer using `save_to`. \n", + "\n", "Later, we use `restore_from` to restore the model, it will also reinitialize the tokenizer !" ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "6idt0dfO9z-S" }, + "outputs": [], "source": [ "first_asr_model.save_to(\"first_model.nemo\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "RpHwCTk1-q4t" }, + "outputs": [], "source": [ "!ls -l -- *.nemo" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -910,8 +894,8 @@ "id": "VIupynXOxODi" }, "source": [ - "There we go! We've put together a full training pipeline for the model and trained it for 50 epochs.\r\n", - "\r\n", + "There we go! We've put together a full training pipeline for the model and trained it for 50 epochs.\n", + "\n", "If you'd like to save this model checkpoint for loading later (e.g. for fine-tuning, or for continuing training), you can simply call `first_asr_model.save_to()`. Then, to restore your weights, you can rebuild the model using the config (let's say you call it `first_asr_model_continued` this time) and call `first_asr_model_continued.restore_from()`." ] }, @@ -926,14 +910,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "wLR7PfEzxbO1" }, + "outputs": [], "source": [ "print(params.model.optim)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -941,25 +925,25 @@ "id": "7wfmZWf-xlNV" }, "source": [ - "### After training and hyper parameter tuning\r\n", - "\r\n", + "### After training and hyper parameter tuning\n", + "\n", "Let's say we wanted to change the learning rate. To do so, we can create a `new_opt` dict and set our desired learning rate, then call `.setup_optimization()` with the new optimization parameters." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "cH31LyZwxi_p" }, + "outputs": [], "source": [ - "import copy\r\n", - "new_opt = copy.deepcopy(params.model.optim)\r\n", - "new_opt.lr = 0.1\r\n", - "first_asr_model.setup_optimization(optim_config=new_opt);\r\n", + "import copy\n", + "new_opt = copy.deepcopy(params.model.optim)\n", + "new_opt.lr = 0.1\n", + "first_asr_model.setup_optimization(optim_config=new_opt);\n", "# And then you can invoke trainer.fit(first_asr_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -967,27 +951,27 @@ "id": "azH7U-K8x0rd" }, "source": [ - "## Inference\r\n", - "\r\n", - "Let's have a quick look at how one could run inference with NeMo's ASR model.\r\n", - "\r\n", + "## Inference\n", + "\n", + "Let's have a quick look at how one could run inference with NeMo's ASR model.\n", + "\n", "First, ``EncDecCTCModelBPE`` and its subclasses contain a handy ``transcribe`` method which can be used to simply obtain audio files' transcriptions. It also has batch_size argument to improve performance." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "O64yk8C4xvTG" }, + "outputs": [], "source": [ "print(first_asr_model.transcribe(paths2audio_files=[data_dir + '/an4/wav/an4_clstk/mgah/cen2-mgah-b.wav',\n", " data_dir + '/an4/wav/an4_clstk/fmjd/cen7-fmjd-b.wav',\n", " data_dir + '/an4/wav/an4_clstk/fmjd/cen8-fmjd-b.wav',\n", " data_dir + '/an4/wav/an4_clstk/fkai/cen8-fkai-b.wav'],\n", " batch_size=4))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1000,50 +984,50 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "Eo2TcBkozlEG" }, + "outputs": [], "source": [ - "# Bigger batch-size = bigger throughput\r\n", - "params['model']['validation_ds']['batch_size'] = 16\r\n", - "\r\n", - "# Setup the test data loader and make sure the model is on GPU\r\n", - "first_asr_model.setup_test_data(test_data_config=params['model']['validation_ds'])\r\n", - "first_asr_model.cuda()\r\n", - "first_asr_model.eval()\r\n", - "\r\n", - "# We remove some preprocessing artifacts which benefit training\r\n", - "first_asr_model.preprocessor.featurizer.pad_to = 0\r\n", - "first_asr_model.preprocessor.featurizer.dither = 0.0\r\n", - "\r\n", - "# We will be computing Word Error Rate (WER) metric between our hypothesis and predictions.\r\n", - "# WER is computed as numerator/denominator.\r\n", - "# We'll gather all the test batches' numerators and denominators.\r\n", - "wer_nums = []\r\n", - "wer_denoms = []\r\n", - "\r\n", - "# Loop over all test batches.\r\n", - "# Iterating over the model's `test_dataloader` will give us:\r\n", - "# (audio_signal, audio_signal_length, transcript_tokens, transcript_length)\r\n", - "# See the AudioToCharDataset for more details.\r\n", - "for test_batch in first_asr_model.test_dataloader():\r\n", - " test_batch = [x.cuda() for x in test_batch]\r\n", - " targets = test_batch[2]\r\n", - " targets_lengths = test_batch[3] \r\n", - " log_probs, encoded_len, greedy_predictions = first_asr_model(\r\n", - " input_signal=test_batch[0], input_signal_length=test_batch[1]\r\n", - " )\r\n", - " # Notice the model has a helper object to compute WER\r\n", - " first_asr_model._wer.update(greedy_predictions, targets, targets_lengths)\r\n", - " _, wer_num, wer_denom = first_asr_model._wer.compute()\r\n", - " wer_nums.append(wer_num.detach().cpu().numpy())\r\n", - " wer_denoms.append(wer_denom.detach().cpu().numpy())\r\n", - "\r\n", - "# We need to sum all numerators and denominators first. Then divide.\r\n", + "# Bigger batch-size = bigger throughput\n", + "params['model']['validation_ds']['batch_size'] = 16\n", + "\n", + "# Setup the test data loader and make sure the model is on GPU\n", + "first_asr_model.setup_test_data(test_data_config=params['model']['validation_ds'])\n", + "first_asr_model.cuda()\n", + "first_asr_model.eval()\n", + "\n", + "# We remove some preprocessing artifacts which benefit training\n", + "first_asr_model.preprocessor.featurizer.pad_to = 0\n", + "first_asr_model.preprocessor.featurizer.dither = 0.0\n", + "\n", + "# We will be computing Word Error Rate (WER) metric between our hypothesis and predictions.\n", + "# WER is computed as numerator/denominator.\n", + "# We'll gather all the test batches' numerators and denominators.\n", + "wer_nums = []\n", + "wer_denoms = []\n", + "\n", + "# Loop over all test batches.\n", + "# Iterating over the model's `test_dataloader` will give us:\n", + "# (audio_signal, audio_signal_length, transcript_tokens, transcript_length)\n", + "# See the AudioToCharDataset for more details.\n", + "for test_batch in first_asr_model.test_dataloader():\n", + " test_batch = [x.cuda() for x in test_batch]\n", + " targets = test_batch[2]\n", + " targets_lengths = test_batch[3] \n", + " log_probs, encoded_len, greedy_predictions = first_asr_model(\n", + " input_signal=test_batch[0], input_signal_length=test_batch[1]\n", + " )\n", + " # Notice the model has a helper object to compute WER\n", + " first_asr_model._wer.update(greedy_predictions, targets, targets_lengths)\n", + " _, wer_num, wer_denom = first_asr_model._wer.compute()\n", + " wer_nums.append(wer_num.detach().cpu().numpy())\n", + " wer_denoms.append(wer_denom.detach().cpu().numpy())\n", + "\n", + "# We need to sum all numerators and denominators first. Then divide.\n", "print(f\"WER = {sum(wer_nums)/sum(wer_denoms)}\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1060,22 +1044,22 @@ "id": "dtl9vEhx3MG7" }, "source": [ - "## Utilizing the underlying tokenizer\r\n", - "\r\n", + "## Utilizing the underlying tokenizer\n", + "\n", "Since the model has an underlying tokenizer, it would be nice to use it externally as well - say for getting the subwords of the transcript or to tokenize a dataset using the same tokenizer as the ASR model." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "fdXg21if2YRp" }, + "outputs": [], "source": [ - "tokenizer = first_asr_model.tokenizer\r\n", + "tokenizer = first_asr_model.tokenizer\n", "tokenizer" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1083,22 +1067,22 @@ "id": "Y96SOqpJ3kG3" }, "source": [ - "You can get the tokenizer's vocabulary using the `tokenizer.tokenizer.get_vocab()` method. \r\n", - "\r\n", + "You can get the tokenizer's vocabulary using the `tokenizer.tokenizer.get_vocab()` method. \n", + "\n", "ASR tokenizers will map the subword to an integer index in the vocabulary for convenience." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "F56_tIRM3g3f" }, + "outputs": [], "source": [ - "vocab = tokenizer.tokenizer.get_vocab()\r\n", + "vocab = tokenizer.tokenizer.get_vocab()\n", "vocab" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1111,51 +1095,51 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "-2tMVskF3uUf" }, + "outputs": [], "source": [ - "tokens = tokenizer.text_to_tokens(\"hello world\")\r\n", + "tokens = tokenizer.text_to_tokens(\"hello world\")\n", "tokens" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "CkxHkKQn4Q-E" }, + "outputs": [], "source": [ - "token_ids = tokenizer.text_to_ids(\"hello world\")\r\n", + "token_ids = tokenizer.text_to_ids(\"hello world\")\n", "token_ids" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "tpdoIrRt4Xim" }, + "outputs": [], "source": [ - "subwords = tokenizer.ids_to_tokens(token_ids)\r\n", + "subwords = tokenizer.ids_to_tokens(token_ids)\n", "subwords" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "wudNyONi4og8" }, + "outputs": [], "source": [ - "text = tokenizer.ids_to_text(token_ids)\r\n", + "text = tokenizer.ids_to_text(token_ids)\n", "text" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1163,29 +1147,29 @@ "id": "E35VBsbf4yWy" }, "source": [ - "## Model Improvements\r\n", - "\r\n", - "You already have all you need to create your own ASR model in NeMo, but there are a few more tricks that you can employ if you so desire. In this section, we'll briefly cover a few possibilities for improving an ASR model.\r\n", - "\r\n", - "### Data Augmentation\r\n", - "\r\n", - "There exist several ASR data augmentation methods that can increase the size of our training set.\r\n", - "\r\n", - "For example, we can perform augmentation on the spectrograms by zeroing out specific frequency segments (\"frequency masking\") or time segments (\"time masking\") as described by [SpecAugment](https://arxiv.org/abs/1904.08779), or zero out rectangles on the spectrogram as in [Cutout](https://arxiv.org/pdf/1708.04552.pdf). In NeMo, we can do all three of these by simply adding a `SpectrogramAugmentation` neural module. (As of now, it does not perform the time warping from the SpecAugment paper.)\r\n", - "\r\n", + "## Model Improvements\n", + "\n", + "You already have all you need to create your own ASR model in NeMo, but there are a few more tricks that you can employ if you so desire. In this section, we'll briefly cover a few possibilities for improving an ASR model.\n", + "\n", + "### Data Augmentation\n", + "\n", + "There exist several ASR data augmentation methods that can increase the size of our training set.\n", + "\n", + "For example, we can perform augmentation on the spectrograms by zeroing out specific frequency segments (\"frequency masking\") or time segments (\"time masking\") as described by [SpecAugment](https://arxiv.org/abs/1904.08779), or zero out rectangles on the spectrogram as in [Cutout](https://arxiv.org/pdf/1708.04552.pdf). In NeMo, we can do all three of these by simply adding a `SpectrogramAugmentation` neural module. (As of now, it does not perform the time warping from the SpecAugment paper.)\n", + "\n", "Our toy model disables spectrogram augmentation, because it is not significantly beneficial for the short demo." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "SMi6Bauy4Jhg" }, + "outputs": [], "source": [ "print(OmegaConf.to_yaml(first_asr_model._cfg['spec_augment']))" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1202,12 +1186,12 @@ "id": "fDTC4fXZ5QnT" }, "source": [ - "### Transfer learning\r\n", - "\r\n", - "Transfer learning is an important machine learning technique that uses a model’s knowledge of one task to perform better on another. Fine-tuning is one of the techniques to perform transfer learning. It is an essential part of the recipe for many state-of-the-art results where a base model is first pretrained on a task with abundant training data and then fine-tuned on different tasks of interest where the training data is less abundant or even scarce.\r\n", - "\r\n", - "In ASR you might want to do fine-tuning in multiple scenarios, for example, when you want to improve your model's performance on a particular domain (medical, financial, etc.) or accented speech. You can even transfer learn from one language to another! Check out [this paper](https://arxiv.org/abs/2005.04290) for examples.\r\n", - "\r\n", + "### Transfer learning\n", + "\n", + "Transfer learning is an important machine learning technique that uses a model’s knowledge of one task to perform better on another. Fine-tuning is one of the techniques to perform transfer learning. It is an essential part of the recipe for many state-of-the-art results where a base model is first pretrained on a task with abundant training data and then fine-tuned on different tasks of interest where the training data is less abundant or even scarce.\n", + "\n", + "In ASR you might want to do fine-tuning in multiple scenarios, for example, when you want to improve your model's performance on a particular domain (medical, financial, etc.) or accented speech. You can even transfer learn from one language to another! Check out [this paper](https://arxiv.org/abs/2005.04290) for examples.\n", + "\n", "Transfer learning with NeMo is simple. Let's demonstrate how we could fine-tune the model we trained earlier on AN4 data. (NOTE: this is a toy example). And, while we are at it, we will change the model's vocabulary to demonstrate how it's done." ] }, @@ -1217,15 +1201,17 @@ "id": "IN0LbDbY5YR1" }, "source": [ - "-----\r\n", + "-----\n", "First, let's create another tokenizer - perhaps using a larger vocabulary size than the small tokenizer we created earlier. Also we swap out `sentencepiece` for `BERT Word Piece` tokenizer." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "LFENXcXw48fc" }, + "outputs": [], "source": [ "!python ./scripts/process_asr_text_tokenizer.py \\\n", " --manifest=\"{data_dir}/an4/train_manifest.json\" \\\n", @@ -1234,9 +1220,7 @@ " --tokenizer=\"wpe\" \\\n", " --no_lower_case \\\n", " --log" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1249,14 +1233,14 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "QtyAB9fQ_qbj" }, + "outputs": [], "source": [ "restored_model = nemo_asr.models.EncDecCTCModelBPE.restore_from(\"./first_model.nemo\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1269,9 +1253,11 @@ }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "4Ey9CUkJ5o56" }, + "outputs": [], "source": [ "# Check what kind of vocabulary/alphabet the model has right now\n", "print(restored_model.decoder.vocabulary)\n", @@ -1282,9 +1268,7 @@ " new_tokenizer_dir=data_dir + \"/tokenizers/an4/tokenizer_wpe_v64/\",\n", " new_tokenizer_type=\"wpe\"\n", ")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1292,60 +1276,60 @@ "id": "UZ3sf2P26SiA" }, "source": [ - "After this, our decoder has completely changed, but our encoder (where most of the weights are) remained intact. Let's fine tune-this model for 20 epochs on AN4 dataset. We will also use the smaller learning rate from ``new_opt` (see the \"After Training\" section)`.\r\n", - "\r\n", + "After this, our decoder has completely changed, but our encoder (where most of the weights are) remained intact. Let's fine tune-this model for 20 epochs on AN4 dataset. We will also use the smaller learning rate from ``new_opt` (see the \"After Training\" section)`.\n", + "\n", "**Note**: For this demonstration, we will also freeze the encoder to speed up finetuning (since both tokenizers are built on the same train set), but in general it should not be done for proper training on a new language (or on a different corpus than the original train corpus)." ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "7m_CRtH46BjO" }, + "outputs": [], "source": [ - "# Use the smaller learning rate we set before\r\n", - "restored_model.setup_optimization(optim_config=new_opt)\r\n", - "\r\n", - "# Point to the data we'll use for fine-tuning as the training set\r\n", - "restored_model.setup_training_data(train_data_config=params['model']['train_ds'])\r\n", - "\r\n", - "# Point to the new validation data for fine-tuning\r\n", - "restored_model.setup_validation_data(val_data_config=params['model']['validation_ds'])\r\n", - "\r\n", - "# Freeze the encoder layers (should not be done for finetuning, only done for demo)\r\n", + "# Use the smaller learning rate we set before\n", + "restored_model.setup_optimization(optim_config=new_opt)\n", + "\n", + "# Point to the data we'll use for fine-tuning as the training set\n", + "restored_model.setup_training_data(train_data_config=params['model']['train_ds'])\n", + "\n", + "# Point to the new validation data for fine-tuning\n", + "restored_model.setup_validation_data(val_data_config=params['model']['validation_ds'])\n", + "\n", + "# Freeze the encoder layers (should not be done for finetuning, only done for demo)\n", "restored_model.encoder.freeze()" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "uCmUWZLD63d9" }, + "outputs": [], "source": [ - "# Load the TensorBoard notebook extension\r\n", - "if COLAB_ENV:\r\n", - " %load_ext tensorboard\r\n", - " %tensorboard --logdir lightning_logs/\r\n", - "else:\r\n", + "# Load the TensorBoard notebook extension\n", + "if COLAB_ENV:\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir lightning_logs/\n", + "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, "metadata": { "id": "fs2aK7xB6pAd" }, + "outputs": [], "source": [ - "# And now we can create a PyTorch Lightning trainer and call `fit` again.\r\n", - "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=20)\r\n", + "# And now we can create a PyTorch Lightning trainer and call `fit` again.\n", + "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=20)\n", "trainer.fit(restored_model)" - ], - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", @@ -1362,22 +1346,22 @@ "id": "alykABQ3CNpf" }, "source": [ - "### Fast Training\r\n", - "\r\n", - "Last but not least, we could simply speed up training our model! If you have the resources, you can speed up training by splitting the workload across multiple GPUs. Otherwise (or in addition), there's always mixed precision training, which allows you to increase your batch size.\r\n", - "\r\n", - "You can use [PyTorch Lightning's Trainer object](https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html?highlight=Trainer) to handle mixed-precision and distributed training for you. Below are some examples of flags you would pass to the `Trainer` to use these features:\r\n", - "\r\n", - "```python\r\n", - "# Mixed precision:\r\n", - "trainer = pl.Trainer(amp_level='O1', precision=16)\r\n", - "\r\n", - "# Trainer with a distributed backend:\r\n", - "trainer = pl.Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\r\n", - "\r\n", - "# Of course, you can combine these flags as well.\r\n", - "```\r\n", - "\r\n", + "### Fast Training\n", + "\n", + "Last but not least, we could simply speed up training our model! If you have the resources, you can speed up training by splitting the workload across multiple GPUs. Otherwise (or in addition), there's always mixed precision training, which allows you to increase your batch size.\n", + "\n", + "You can use [PyTorch Lightning's Trainer object](https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html?highlight=Trainer) to handle mixed-precision and distributed training for you. Below are some examples of flags you would pass to the `Trainer` to use these features:\n", + "\n", + "```python\n", + "# Mixed precision:\n", + "trainer = pl.Trainer(amp_level='O1', precision=16)\n", + "\n", + "# Trainer with a distributed backend:\n", + "trainer = pl.Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n", + "\n", + "# Of course, you can combine these flags as well.\n", + "```\n", + "\n", "Finally, have a look at [example scripts in NeMo repository](https://github.com/NVIDIA/NeMo/blob/stable/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) which can handle mixed precision and distributed training using command-line arguments." ] }, @@ -1387,33 +1371,33 @@ "id": "4uQGWtRJDF0O" }, "source": [ - "## Under the Hood\r\n", - "\r\n", - "NeMo is open-source and we do all our model development in the open, so you can inspect our code if you wish.\r\n", - "\r\n", - "In particular, ``nemo_asr.model.EncDecCTCModelBPE`` is an encoder-decoder model which is constructed using several ``Neural Modules`` taken from ``nemo_asr.modules.`` Here is what its forward pass looks like:\r\n", - "```python\r\n", - "def forward(self, input_signal, input_signal_length):\r\n", - " processed_signal, processed_signal_len = self.preprocessor(\r\n", - " input_signal=input_signal, length=input_signal_length,\r\n", - " )\r\n", - " # Spec augment is not applied during evaluation/testing\r\n", - " if self.spec_augmentation is not None and self.training:\r\n", - " processed_signal = self.spec_augmentation(input_spec=processed_signal)\r\n", - " encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\r\n", - " log_probs = self.decoder(encoder_output=encoded)\r\n", - " greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)\r\n", - " return log_probs, encoded_len, greedy_predictions\r\n", - "```\r\n", - "Here:\r\n", - "\r\n", - "* ``self.preprocessor`` is an instance of ``nemo_asr.modules.AudioToMelSpectrogramPreprocessor``, which is a neural module that takes audio signal and converts it into a Mel-Spectrogram\r\n", - "* ``self.spec_augmentation`` - is a neural module of type ```nemo_asr.modules.SpectrogramAugmentation``, which implements data augmentation. \r\n", - "* ``self.encoder`` - is a convolutional Jasper, QuartzNet or Citrinet-like encoder of type ``nemo_asr.modules.ConvASREncoder``\r\n", - "* ``self.decoder`` - is a ``nemo_asr.modules.ConvASRDecoder`` which simply projects into the target alphabet (vocabulary).\r\n", - "\r\n", - "Also, ``EncDecCTCModelBPE`` uses the audio dataset class ``nemo_asr.data.AudioToBPEDataset`` and CTC loss implemented in ``nemo_asr.losses.CTCLoss``.\r\n", - "\r\n", + "## Under the Hood\n", + "\n", + "NeMo is open-source and we do all our model development in the open, so you can inspect our code if you wish.\n", + "\n", + "In particular, ``nemo_asr.model.EncDecCTCModelBPE`` is an encoder-decoder model which is constructed using several ``Neural Modules`` taken from ``nemo_asr.modules.`` Here is what its forward pass looks like:\n", + "```python\n", + "def forward(self, input_signal, input_signal_length):\n", + " processed_signal, processed_signal_len = self.preprocessor(\n", + " input_signal=input_signal, length=input_signal_length,\n", + " )\n", + " # Spec augment is not applied during evaluation/testing\n", + " if self.spec_augmentation is not None and self.training:\n", + " processed_signal = self.spec_augmentation(input_spec=processed_signal)\n", + " encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\n", + " log_probs = self.decoder(encoder_output=encoded)\n", + " greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)\n", + " return log_probs, encoded_len, greedy_predictions\n", + "```\n", + "Here:\n", + "\n", + "* ``self.preprocessor`` is an instance of ``nemo_asr.modules.AudioToMelSpectrogramPreprocessor``, which is a neural module that takes audio signal and converts it into a Mel-Spectrogram\n", + "* ``self.spec_augmentation`` - is a neural module of type ```nemo_asr.modules.SpectrogramAugmentation``, which implements data augmentation. \n", + "* ``self.encoder`` - is a convolutional Jasper, QuartzNet or Citrinet-like encoder of type ``nemo_asr.modules.ConvASREncoder``\n", + "* ``self.decoder`` - is a ``nemo_asr.modules.ConvASRDecoder`` which simply projects into the target alphabet (vocabulary).\n", + "\n", + "Also, ``EncDecCTCModelBPE`` uses the audio dataset class ``nemo_asr.data.AudioToBPEDataset`` and CTC loss implemented in ``nemo_asr.losses.CTCLoss``.\n", + "\n", "You can use these and other neural modules (or create new ones yourself!) to construct new ASR models." ] }, @@ -1423,23 +1407,39 @@ "id": "5kKcSb7LDdI3" }, "source": [ - "# Further Reading/Watching:\r\n", - "\r\n", - "That's all for now! If you'd like to learn more about the topics covered in this tutorial, here are some resources that may interest you:\r\n", - "- [Stanford Lecture on ASR](https://www.youtube.com/watch?v=3MjIkWxXigM)\r\n", - "- [\"An Intuitive Explanation of Connectionist Temporal Classification\"](https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c)\r\n", - "- [Explanation of CTC with Prefix Beam Search](https://medium.com/corti-ai/ctc-networks-and-language-models-prefix-beam-search-explained-c11d1ee23306)\r\n", - "- [Byte Pair Encoding](https://arxiv.org/abs/1508.07909)\r\n", - "- [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf)\r\n", - "- [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing](https://www.aclweb.org/anthology/D18-2012/)\r\n", - "- [Jasper Paper](https://arxiv.org/abs/1904.03288)\r\n", - "- [QuartzNet paper](https://arxiv.org/abs/1910.10261)\r\n", - "- [SpecAugment Paper](https://arxiv.org/abs/1904.08779)\r\n", - "- [Explanation and visualization of SpecAugment](https://towardsdatascience.com/state-of-the-art-audio-data-augmentation-with-google-brains-specaugment-and-pytorch-d3d1a3ce291e)\r\n", - "- [Cutout Paper](https://arxiv.org/pdf/1708.04552.pdf)\r\n", - "- [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507)\r\n", + "# Further Reading/Watching:\n", + "\n", + "That's all for now! If you'd like to learn more about the topics covered in this tutorial, here are some resources that may interest you:\n", + "- [Stanford Lecture on ASR](https://www.youtube.com/watch?v=3MjIkWxXigM)\n", + "- [\"An Intuitive Explanation of Connectionist Temporal Classification\"](https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c)\n", + "- [Explanation of CTC with Prefix Beam Search](https://medium.com/corti-ai/ctc-networks-and-language-models-prefix-beam-search-explained-c11d1ee23306)\n", + "- [Byte Pair Encoding](https://arxiv.org/abs/1508.07909)\n", + "- [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf)\n", + "- [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing](https://www.aclweb.org/anthology/D18-2012/)\n", + "- [Jasper Paper](https://arxiv.org/abs/1904.03288)\n", + "- [QuartzNet paper](https://arxiv.org/abs/1910.10261)\n", + "- [SpecAugment Paper](https://arxiv.org/abs/1904.08779)\n", + "- [Explanation and visualization of SpecAugment](https://towardsdatascience.com/state-of-the-art-audio-data-augmentation-with-google-brains-specaugment-and-pytorch-d3d1a3ce291e)\n", + "- [Cutout Paper](https://arxiv.org/pdf/1708.04552.pdf)\n", + "- [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507)\n", "- [Transfer Learning Blogpost](https://developer.nvidia.com/blog/jump-start-training-for-speech-recognition-models-with-nemo/)" ] } - ] -} \ No newline at end of file + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "ASR_with_Subword_Tokenization.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/asr/ASR_with_Transducers.ipynb b/tutorials/asr/ASR_with_Transducers.ipynb index b59153517558..f0efdf1cb363 100644 --- a/tutorials/asr/ASR_with_Transducers.ipynb +++ b/tutorials/asr/ASR_with_Transducers.ipynb @@ -28,7 +28,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/Buffered_Transducer_Inference.ipynb b/tutorials/asr/Buffered_Transducer_Inference.ipynb index 939355de6368..2d42749524d9 100644 --- a/tutorials/asr/Buffered_Transducer_Inference.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# Update numba and restart (this is required to update internal numba version of Colab)\n", diff --git a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb index eb4676b6e01f..5c00335d539f 100644 --- a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb @@ -1,22 +1,4 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "Buffered_Transducer_Inference_with_LCS_Merge.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - }, - "accelerator": "GPU" - }, "cells": [ { "cell_type": "code", @@ -45,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "# Update numba and restart (this is required to update internal numba version of Colab)\n", @@ -69,6 +51,9 @@ }, { "cell_type": "markdown", + "metadata": { + "id": "cPuPBSU0ioJO" + }, "source": [ "# Buffered Transducer evaluation with Longest Common Subsequence Merge\n", "\n", @@ -81,48 +66,51 @@ "-----\n", "\n", "You may use this script [ASR Chunked Streaming Inference](https://github.com/NVIDIA/NeMo/blob/stable/examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py) to transcribe long audio files with Transducer models as well as experiment with both merge algorithms. \n" - ], - "metadata": { - "id": "cPuPBSU0ioJO" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "ylQ3GwvX-n7R" + }, "source": [ "------\n", "\n", "**Note**: It is highly recommended to review the ``Streaming ASR`` tutorial for a good overview of how streaming/buffered inference works for CTC models and the underlying motivation of streaming ASR itself.\n", "\n", "------" - ], - "metadata": { - "id": "ylQ3GwvX-n7R" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "2eDAsjyCi3lc" + }, "source": [ "# Prepare the dataset\n", "\n", "We will reuse the Librispeech dev-clean subset of [Mini Librispeech](https://www.openslr.org/31/). This time, we will not concatenate the audio segments but simply evaluate them in buffered mode over all the audio samples.\n", "\n", "**Note**: Conformer inference over the entire dev set will take an exorbitant amount of time on the CPU. We recommend the use of GPU for this tutorial." - ], - "metadata": { - "id": "2eDAsjyCi3lc" - } + ] }, { "cell_type": "markdown", - "source": [ - "## Download and prepare Mini Librispeech" - ], "metadata": { "id": "fBYvC3lyjM7O" - } + }, + "source": [ + "## Download and prepare Mini Librispeech" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "LBiTnpz6iket" + }, + "outputs": [], "source": [ "#@title Prepare dataset and manifest for Libripeech Dev Clean subset.\n", "import os\n", @@ -139,39 +127,38 @@ " --data_sets dev_clean_2 \\\n", " --num_workers=10 \\\n", " --log" - ], - "metadata": { - "cellView": "form", - "id": "LBiTnpz6iket" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "manifest = os.path.join(os.getcwd(), \"datasets/mini-dev-clean/dev_clean_2.json\")\n", - "print(\"Manifest path :\", manifest)" - ], + "execution_count": null, "metadata": { "id": "KHcy1Jbx8d9V" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "manifest = os.path.join(os.getcwd(), \"datasets/mini-dev-clean/dev_clean_2.json\")\n", + "print(\"Manifest path :\", manifest)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "8g61qBwgkHiw" + }, "source": [ "# Prepare the model\n", "\n", "We will use the same Conformer Transducer model used in the `Buffered Transducer Inference` tutorial, which will provide a fair comparison between the proposed merge algorithms described here." - ], - "metadata": { - "id": "8g61qBwgkHiw" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "j9UHfsR1j-uf" + }, + "outputs": [], "source": [ "import torch\n", "import nemo.collections.asr as nemo_asr\n", @@ -189,26 +176,26 @@ "\n", "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", "device" - ], - "metadata": { - "id": "j9UHfsR1j-uf" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "pretrained_model_name = \"stt_en_conformer_transducer_large\"" - ], + "execution_count": null, "metadata": { "id": "CzkoimqKl07U" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "pretrained_model_name = \"stt_en_conformer_transducer_large\"" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "0LjtehkvlvKE" + }, + "outputs": [], "source": [ "# Clear up memory\n", "torch.cuda.empty_cache()\n", @@ -218,15 +205,13 @@ "# device = 'cpu' # You can transcribe even longer samples on the CPU, though it will take much longer !\n", "model = model.to(device)\n", "model.freeze()" - ], - "metadata": { - "id": "0LjtehkvlvKE" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "OPZqcbNEnRkI" + }, "source": [ "# Longest Common Subsequence Merge\n", "\n", @@ -235,13 +220,15 @@ "In contrast to the `Middle Token` algorithm, which utilizes certain seconds of both past and future context in order to determine the \"middle tokens\" for that current buffer, the `LCS Merge` algorithm merges only consecutive buffers by selecting the overlap between the end of the `i-1`th buffer and the beginning of the `i`th buffer sub-word tokens, then removing the overlapped tokens from the `i`th buffer.\n", "\n", "While the idea is simple, since the same text can be represented by a different combination of sub-words, some additional expansion steps must be accounted for to account for imperfect alignment between two buffers." - ], - "metadata": { - "id": "OPZqcbNEnRkI" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "pEPLZyJP_zx2" + }, + "outputs": [], "source": [ "### Utility Functions ###\n", "def print_alignment(alignment):\n", @@ -276,15 +263,13 @@ "\n", " extras['alignment'] = alignment\n", " torch.save(extras, filepath)" - ], - "metadata": { - "id": "pEPLZyJP_zx2" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "EwPrhOP2_7D2" + }, "source": [ "## Algorithm overview\n", "\n", @@ -304,13 +289,15 @@ "5. Perform a backward trace of the LCS suffix matrix to find detached sections to know the beginning index of slice and length of slice.\n", "6. Finally, check that beginning index of slice < max number of buffer chunks; if true, then slice off new buffer\n", " " - ], - "metadata": { - "id": "EwPrhOP2_7D2" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AOBFADPdoJc8" + }, + "outputs": [], "source": [ "# Minimum number of tokens required to assign a LCS merge step, otherwise ignore and\n", "# select all i-1 and ith buffer tokens to merge.\n", @@ -536,15 +523,13 @@ " return result_idx, LCSuff\n", "\n", "\n" - ], - "metadata": { - "id": "AOBFADPdoJc8" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "QeCGszfO_5cI" + }, "source": [ "## Merge Overview\n", "\n", @@ -565,13 +550,15 @@ "5. Slice off the new data (`i`th chunk)\n", "\n", "6. Merge the previous and current subset of the chunk and return the merged buffer." - ], - "metadata": { - "id": "QeCGszfO_5cI" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "_6r78fEm_48d" + }, + "outputs": [], "source": [ "def lcs_alignment_merge_buffer(buffer, data, delay, model, max_steps_per_timestep: int = 5, filepath: str = None):\n", " \"\"\"\n", @@ -606,28 +593,28 @@ " # Concat data to buffer\n", " buffer += data\n", " return buffer" - ], - "metadata": { - "id": "_6r78fEm_48d" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Bz31XOhLqu3z" + }, "source": [ "# LCS Merge algorithm as a basis for Buffered ASR\n", "\n", "Next, let us extend the previous `BatchedFrameASRRNNT` codebase for Buffered Transducer to incorporate the new merge algorithm.\n", "\n", "We will note that the vast majority of the code remains unchanged - only the `transcribe` function has been changed to utilize the new merge algorithm." - ], - "metadata": { - "id": "Bz31XOhLqu3z" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wNgmc68nl1Ri" + }, + "outputs": [], "source": [ "from nemo.collections.asr.parts.utils import streaming_utils\n", "from torch.utils.data import DataLoader\n", @@ -721,28 +708,29 @@ " for idx in range(self.batch_size):\n", " output.append(self.greedy_merge(self.unmerged[idx]))\n", " return output\n" - ], - "metadata": { - "id": "wNgmc68nl1Ri" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "0fmD9goyrmEb" + }, "source": [ "# Comparing \"Middle Token\" and \"LCS Merge\"\n", "\n", "While we propose the two algorithms - `Middle Token` and `LCS Merge`, we would recommend using either algorithm in the appropriate circumstances. The `Middle Token` algorithm performs well in general, and its mistakes are often fewer than the `LCS Merge` algorithm but requires future context, which may increase latency by a small amount. There are also cases where `LCS Merge` may select better alignments and result in slightly better scores for some audio samples.\n", "\n", "In general, we propose these approaches to discuss further and research merge algorithms that show some trade-off between latency and accuracy." - ], - "metadata": { - "id": "0fmD9goyrmEb" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "FlIGOc_yl4aQ" + }, + "outputs": [], "source": [ "#@title Change Decoding Strategy for Buffered Inference\n", "# Change Decoding Config\n", @@ -755,16 +743,16 @@ " decoding_cfg.fused_batch_size = -1 # temporarily stop fused batch during inference.\n", "\n", "model.change_decoding_strategy(decoding_cfg)" - ], - "metadata": { - "cellView": "form", - "id": "FlIGOc_yl4aQ" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "laPBH4eJsiJk" + }, + "outputs": [], "source": [ "#@title Helper methods to transcribe audio in buffered mode\n", "\n", @@ -816,27 +804,27 @@ " \n", " print(\"Finished transcribing audio files\")\n", " return hyps" - ], - "metadata": { - "id": "laPBH4eJsiJk", - "cellView": "form" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "csSNtwziubeM" + }, "source": [ "## Select data subset\n", "\n", "On the GPU, it would take a few minutes to perform inference for the entire dataset, but on the CPU, it would take quite a long time. While the defaults will exist for the whole dataset, if only the CPU is available for some reason, we encourage you to subsample the dataset." - ], - "metadata": { - "id": "csSNtwziubeM" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "QoZ0fG8zuf5E" + }, + "outputs": [], "source": [ "#@title Manifest helper\n", "import json\n", @@ -868,28 +856,27 @@ " \n", " print(f\"Prepared subset manifest with {len(sub_manifest)} samples.\")\n", " return sub_manifest" - ], - "metadata": { - "cellView": "form", - "id": "QoZ0fG8zuf5E" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "manifest_data = read_manifest(manifest)\n", - "print(f\"Read {len(manifest_data)} samples from manifest {manifest}\")" - ], + "execution_count": null, "metadata": { "id": "I8dnQRL6umrO" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "manifest_data = read_manifest(manifest)\n", + "print(f\"Read {len(manifest_data)} samples from manifest {manifest}\")" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zZOTSFHsuswe" + }, + "outputs": [], "source": [ "num_samples = len(manifest_data)\n", "\n", @@ -897,26 +884,26 @@ "sub_manifest = subset_manifest(manifest_data, num_samples)\n", "audio_filepaths = [sample['audio_filepath'] for sample in sub_manifest]\n", "ground_texts = [sample['text'] for sample in sub_manifest]" - ], - "metadata": { - "id": "zZOTSFHsuswe" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "GURl8G2Bwlad" + }, "source": [ "## Buffered Inference arguments\n", "\n", "Below we detail some critical arguments for buffered transducer inference. Note that the primary difference between streaming and buffered inference would be the chunk length, with larger values contributing to a lower word error rate but higher latency. " - ], - "metadata": { - "id": "GURl8G2Bwlad" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "gF86J9Knwpe_" + }, + "outputs": [], "source": [ "chunk_len_in_secs: float = 8.0\n", "context_len_in_secs: float = 1.0\n", @@ -928,123 +915,123 @@ " \n", "##########################################################################\n", "buffer_len_in_secs = chunk_len_in_secs + 2* context_len_in_secs\n" - ], - "metadata": { - "id": "gF86J9Knwpe_" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "C1s93TbcwZNt" + }, "source": [ "## Baseline: Middle Token Predictions\n", "\n", "Now compute the transcriptions over the data subset using the baseline algorithm - `Middle Token`. " - ], - "metadata": { - "id": "C1s93TbcwZNt" - } + ] }, { "cell_type": "code", - "source": [ - "asr_middle = streaming_utils.BatchedFrameASRRNNT(model, chunk_len_in_secs, buffer_len_in_secs,\n", - " batch_size=batch_size, max_steps_per_timestep=max_steps_per_timestep)" - ], + "execution_count": null, "metadata": { "id": "PsqjMkeEu4oK" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "asr_middle = streaming_utils.BatchedFrameASRRNNT(model, chunk_len_in_secs, buffer_len_in_secs,\n", + " batch_size=batch_size, max_steps_per_timestep=max_steps_per_timestep)" + ] }, { "cell_type": "code", - "source": [ - "middle_transcripts = transcribe_buffers(asr_middle, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" - ], + "execution_count": null, "metadata": { "id": "sNQNDjroxWb8" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "middle_transcripts = transcribe_buffers(asr_middle, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ENGTX70QzcrB" + }, + "outputs": [], "source": [ "from nemo.collections.asr.metrics.wer import word_error_rate\n", "\n", "wer_middle = word_error_rate(middle_transcripts, ground_texts, use_cer=False)\n", "print(\"Middle token algorithm WER :\", wer_middle)" - ], - "metadata": { - "id": "ENGTX70QzcrB" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Z3hCweGDy12t" + }, "source": [ "## LCS Merge Predictions\n", "\n", "Next, let us compute the transcriptions over the data subset using the `LCS Merge` algorithm." - ], - "metadata": { - "id": "Z3hCweGDy12t" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "E7DBDeBPx4cJ" + }, + "outputs": [], "source": [ "asr_lcs = LongestCommonSubsequenceBatchedFrameASRRNNT(model, chunk_len_in_secs, buffer_len_in_secs,\n", " batch_size=batch_size, max_steps_per_timestep=max_steps_per_timestep,\n", " alignment_basepath=lcs_alignments_path)" - ], - "metadata": { - "id": "E7DBDeBPx4cJ" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "lcs_transcripts = transcribe_buffers(asr_lcs, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" - ], + "execution_count": null, "metadata": { "id": "BQo9TNSyzPfv" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "lcs_transcripts = transcribe_buffers(asr_lcs, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" + ] }, { "cell_type": "code", - "source": [ - "wer_lcs = word_error_rate(lcs_transcripts, ground_texts, use_cer=False)\n", - "print(\"LCS algorithm WER :\", wer_lcs)" - ], + "execution_count": null, "metadata": { "id": "IXW6I3hDzT6I" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "wer_lcs = word_error_rate(lcs_transcripts, ground_texts, use_cer=False)\n", + "print(\"LCS algorithm WER :\", wer_lcs)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "MjGfb1x00egs" + }, "source": [ "# Compare the text predictions from the two merge algorithms\n", "\n", "Depending on the data subset chosen (or randomly sampled), the WER for this algorithm may be higher or lower than the baseline. Note that if you select all the samples in the dataset, then the WER of this method is slightly higher than the baseline.\n", "\n", "We will do a more in-depth analysis of the failure cases below." - ], - "metadata": { - "id": "MjGfb1x00egs" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "80NvUa1Y0dk7" + }, + "outputs": [], "source": [ "def compare_algorithms(ground_truth, middle_transcripts, lcs_transcripts, use_cer=False):\n", " worse = []\n", @@ -1068,37 +1055,38 @@ " print(\"Number of samples LCS merge was better than middle ground :\", len(better))\n", " print(\"Number of samples LCS merge was worse than middle ground :\", len(worse))\n", " return same, better, worse" - ], - "metadata": { - "id": "80NvUa1Y0dk7" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "both_same, lcs_better, lcs_worse = compare_algorithms(ground_texts, middle_transcripts, lcs_transcripts, use_cer=False)" - ], + "execution_count": null, "metadata": { "id": "A-NIFnjo0KB5" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "both_same, lcs_better, lcs_worse = compare_algorithms(ground_texts, middle_transcripts, lcs_transcripts, use_cer=False)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "awZNviRC5C-O" + }, "source": [ "# EXTRA: Compare the alignment matrices of LCS\n", "\n", "Over the entire dataset, there would be some samples where the `LCS Merge` algorithm did better than the `Middle Token` algorithm and vice-versa. Below, we will take a sample-level look at such cases, and since the `LCS Merge` algorithm is an alignment-based technique, we can visualize the alignment itself and determine what cases it failed and the source of the error in the alignment itself.\n" - ], - "metadata": { - "id": "awZNviRC5C-O" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "cellView": "form", + "id": "SiW4xw424lB1" + }, + "outputs": [], "source": [ "#@title LCS Alignment helper functions\n", "\n", @@ -1197,29 +1185,28 @@ " print()\n", "\n", " " - ], - "metadata": { - "cellView": "form", - "id": "SiW4xw424lB1" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "_DEYtkP46Srw" + }, "source": [ "## Worse alignment\n", "\n", "Let us search for a sample where the `LCS Merge` did worse than the `Middle Token` algorithm. \n", "\n", "Such cases are necessary to analyze because it is visually apparent where the alignment went wrong. We can determine if there could be an extension to this algorithm to further improve such cases.\n" - ], - "metadata": { - "id": "_DEYtkP46Srw" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "rt7c-qoH5a30" + }, + "outputs": [], "source": [ "worse_idx = find_first_sample_with_alignment(lcs_alignments_path, lcs_worse, start_idx=0)\n", "worse_sample = lcs_worse[worse_idx]\n", @@ -1227,39 +1214,39 @@ "print(\"A sample where LCS did worse than Middle Token merge algoritm :\")\n", "print(\"The texts are structured as (Ground Truth, Middle Token, LCS Merge)\")\n", "worse_sample" - ], - "metadata": { - "id": "rt7c-qoH5a30" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "display_alignment_merge(lcs_alignments_path, worse_sample, print_xy_token_ids=False)" - ], + "execution_count": null, "metadata": { "id": "_wBepfcH7kAK" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "display_alignment_merge(lcs_alignments_path, worse_sample, print_xy_token_ids=False)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Z-xHYGIEJXBx" + }, "source": [ "## Better alignment\n", "\n", "Next, let us search for a sample where the `LCS Merge` did better than the `Middle Token` algorithm. \n", "\n", "Such cases are also essential to analyze because it is visually apparent where the alignment was better. We can determine if we can improve the `Middle Token` algorithm." - ], - "metadata": { - "id": "Z-xHYGIEJXBx" - } + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "AHBx3QpQE5OX" + }, + "outputs": [], "source": [ "better_idx = find_first_sample_with_alignment(lcs_alignments_path, lcs_better, start_idx=0)\n", "better_sample = lcs_better[better_idx]\n", @@ -1267,36 +1254,49 @@ "print(\"A sample where LCS did better than Middle Token merge algoritm :\")\n", "print(\"The texts are structured as (Ground Truth, Middle Token, LCS Merge)\")\n", "better_sample" - ], - "metadata": { - "id": "AHBx3QpQE5OX" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "display_alignment_merge(lcs_alignments_path, better_sample)" - ], + "execution_count": null, "metadata": { "id": "urjYWVGfJhlU" }, - "execution_count": null, - "outputs": [] + "outputs": [], + "source": [ + "display_alignment_merge(lcs_alignments_path, better_sample)" + ] }, { "cell_type": "markdown", + "metadata": { + "id": "GRFifXuROpzg" + }, "source": [ "# Final notes\n", "\n", "Following the [Buffered Transducer Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/Buffered_Transducer_Inference.ipynb) tutorial and designing a token merge algorithm that can be a simple extension to the baseline `Middle Token` algorithm, we see that there are cases where both algorithms have their uses. \n", "\n", "To expand our research effort on developing more sophisticated streaming / buffered transducer inference methods, we encourage the users to try these algorithms in script format for efficient inference on large datasets - available at [ASR Chunked Streaming Inference](https://github.com/NVIDIA/NeMo/blob/stable/examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py).\n" - ], - "metadata": { - "id": "GRFifXuROpzg" - } + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "Buffered_Transducer_Inference_with_LCS_Merge.ipynb", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" } - ] -} \ No newline at end of file + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tutorials/asr/Intro_to_Transducers.ipynb b/tutorials/asr/Intro_to_Transducers.ipynb index d03508f52168..a82a4804ca56 100644 --- a/tutorials/asr/Intro_to_Transducers.ipynb +++ b/tutorials/asr/Intro_to_Transducers.ipynb @@ -43,7 +43,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ], "execution_count": null, diff --git a/tutorials/asr/Multilang_ASR.ipynb b/tutorials/asr/Multilang_ASR.ipynb index 06dd5f3d1ef2..8320cc8a07c9 100644 --- a/tutorials/asr/Multilang_ASR.ipynb +++ b/tutorials/asr/Multilang_ASR.ipynb @@ -101,7 +101,7 @@ "\n", "## Install NeMo\n", "## We are using the main branch but you might want to adjust that too\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Offline_ASR.ipynb b/tutorials/asr/Offline_ASR.ipynb index 6fc3862fb3a1..2dd4cbe9d814 100644 --- a/tutorials/asr/Offline_ASR.ipynb +++ b/tutorials/asr/Offline_ASR.ipynb @@ -51,7 +51,7 @@ "id": "I9eIxAyKHREB" }, "source": [ - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "try:\n", " # Import NeMo Speech Recognition collection\n", " import nemo.collections.asr as nemo_asr\n", diff --git a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb index d05503c0f1f3..29913fe0fe73 100644 --- a/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb +++ b/tutorials/asr/Offline_ASR_with_VAD_for_CTC_models.ipynb @@ -22,7 +22,7 @@ "!pip install wget\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Online_ASR_Microphone_Demo.ipynb b/tutorials/asr/Online_ASR_Microphone_Demo.ipynb index 751a33cdd705..5d2f1451d1bf 100644 --- a/tutorials/asr/Online_ASR_Microphone_Demo.ipynb +++ b/tutorials/asr/Online_ASR_Microphone_Demo.ipynb @@ -26,7 +26,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Noise_Augmentation.ipynb b/tutorials/asr/Online_Noise_Augmentation.ipynb index 9781d965b0c1..5756c7d58ebe 100644 --- a/tutorials/asr/Online_Noise_Augmentation.ipynb +++ b/tutorials/asr/Online_Noise_Augmentation.ipynb @@ -31,7 +31,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb b/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb index 43b7c74e1db8..2076bc06982b 100644 --- a/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb +++ b/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb @@ -26,7 +26,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb b/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb index 3e1f05369e48..2488e46287a6 100644 --- a/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb +++ b/tutorials/asr/Online_Offline_Speech_Commands_Demo.ipynb @@ -28,7 +28,7 @@ "!pip install pyaudio\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Self_Supervised_Pre_Training.ipynb b/tutorials/asr/Self_Supervised_Pre_Training.ipynb index c0b0f8aff869..0f0270c1ad75 100644 --- a/tutorials/asr/Self_Supervised_Pre_Training.ipynb +++ b/tutorials/asr/Self_Supervised_Pre_Training.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "\"\"\"\n", diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index 40ce00ae23c2..14cf1dc3812f 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -60,7 +60,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/Streaming_ASR.ipynb b/tutorials/asr/Streaming_ASR.ipynb index f4aa8d160057..5d4d5b188e18 100644 --- a/tutorials/asr/Streaming_ASR.ipynb +++ b/tutorials/asr/Streaming_ASR.ipynb @@ -27,7 +27,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index 8ef5322b13a2..f0d2ef14ce6f 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -27,7 +27,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb b/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb index 2fcd2f399940..468c602a8765 100644 --- a/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb +++ b/tutorials/asr/asr_adapters/ASR_with_Adapters.ipynb @@ -51,7 +51,7 @@ "!pip install matplotlib>=3.3.2\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "## Grab the config we'll use in this example\n", diff --git a/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb b/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb index 120207831ce6..faa93de12514 100644 --- a/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb +++ b/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb @@ -26,7 +26,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/02_NLP_Tokenizers.ipynb b/tutorials/nlp/02_NLP_Tokenizers.ipynb index f6b56e0712a3..c63d2a8b1689 100644 --- a/tutorials/nlp/02_NLP_Tokenizers.ipynb +++ b/tutorials/nlp/02_NLP_Tokenizers.ipynb @@ -10,7 +10,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'" + "BRANCH = 'main'" ] }, { @@ -35,7 +35,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb b/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb index e535f7594f97..323bfa1c49b8 100644 --- a/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb +++ b/tutorials/nlp/Data_Preprocessing_and_Cleaning_for_NMT.ipynb @@ -300,7 +300,7 @@ "\n", "## Install NeMo\n", "\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]\n", "\n", "!pip uninstall -y sacrebleu\n", diff --git a/tutorials/nlp/Dialogue.ipynb b/tutorials/nlp/Dialogue.ipynb index 8395fb4c8eb1..ddd3bdd4f929 100644 --- a/tutorials/nlp/Dialogue.ipynb +++ b/tutorials/nlp/Dialogue.ipynb @@ -27,7 +27,7 @@ "outputs": [], "source": [ "import os \n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!apt-get update && apt-get install -y libsndfile1 ffmpeg\n", "!git clone https://github.com/NVIDIA/NeMo --branch $BRANCH\n", "os.chdir('NeMo')\n", diff --git a/tutorials/nlp/Entity_Linking_Medical.ipynb b/tutorials/nlp/Entity_Linking_Medical.ipynb index dd41a25e5601..0d7a1d5c8de5 100644 --- a/tutorials/nlp/Entity_Linking_Medical.ipynb +++ b/tutorials/nlp/Entity_Linking_Medical.ipynb @@ -17,7 +17,7 @@ "\"\"\"\n", "\n", "## Install NeMo if using google collab or if its not installed locally\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/nlp/GLUE_Benchmark.ipynb b/tutorials/nlp/GLUE_Benchmark.ipynb index 203a278bea88..d8fe75940b09 100644 --- a/tutorials/nlp/GLUE_Benchmark.ipynb +++ b/tutorials/nlp/GLUE_Benchmark.ipynb @@ -44,7 +44,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" + "BRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb index c548bdb02161..104d69df18e2 100644 --- a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb +++ b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb b/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb index 985fdb568042..3dc3d6ce192e 100644 --- a/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb +++ b/tutorials/nlp/Megatron_Synthetic_Tabular_Data_Generation.ipynb @@ -62,7 +62,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "DATA_PATH='.'\n", "TRANSACTIONS=DATA_PATH+'/card_transaction.v1.csv'\n", "#CHECKPOINTS='/chk_points'\n", diff --git a/tutorials/nlp/Punctuation_and_Capitalization.ipynb b/tutorials/nlp/Punctuation_and_Capitalization.ipynb index aa80ebb5bd91..1519c234372b 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'" + "BRANCH = 'main'" ] }, { diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index 57d443ddf5df..5580bc4cf946 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -10,7 +10,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'" + "BRANCH = 'main'" ] }, { diff --git a/tutorials/nlp/Question_Answering.ipynb b/tutorials/nlp/Question_Answering.ipynb index f461a5f651ef..5ce89b3baafc 100644 --- a/tutorials/nlp/Question_Answering.ipynb +++ b/tutorials/nlp/Question_Answering.ipynb @@ -74,7 +74,7 @@ }, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'" + "BRANCH = 'main'" ] }, { diff --git a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb index 54ff9d7ccabb..b7c25cb416ef 100644 --- a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb +++ b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb @@ -6,7 +6,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'" + "BRANCH = 'main'" ] }, { diff --git a/tutorials/nlp/Text2Sparql.ipynb b/tutorials/nlp/Text2Sparql.ipynb index 5b238ca27e60..0370831bf732 100644 --- a/tutorials/nlp/Text2Sparql.ipynb +++ b/tutorials/nlp/Text2Sparql.ipynb @@ -20,7 +20,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, diff --git a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb index b38f23002b6e..5b5b74e7bf11 100644 --- a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb +++ b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb @@ -20,7 +20,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", "\n" ] diff --git a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb index bc41c8568844..0e8fadde8041 100644 --- a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb +++ b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb @@ -30,7 +30,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH = 'r1.13.0'" + "BRANCH = 'main'" ] }, { @@ -53,7 +53,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" + "BRANCH = 'main'\n!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n" ], "execution_count": null, "outputs": [] diff --git a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb index 05706014b9ba..dd9278666a28 100644 --- a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb +++ b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb @@ -22,7 +22,7 @@ "# If you're using Google Colab and not running locally, run this cell\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" ] }, @@ -671,4 +671,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb index edb982a6fa0e..ea943b35e0d0 100644 --- a/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb +++ b/tutorials/speaker_tasks/ASR_with_SpeakerDiarization.ipynb @@ -30,7 +30,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb index 9c790824afcb..64ceb49d7d64 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb @@ -23,7 +23,7 @@ "!pip install text-unidecode\n", "\n", "# ## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "## Install TorchAudio\n", diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb index c401591ea319..91df72848614 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb @@ -18,7 +18,7 @@ "\"\"\"\n", "\n", "NEMO_DIR_PATH = \"NeMo\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "\n", "! git clone https://github.com/NVIDIA/NeMo\n", "%cd NeMo\n", diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index 0b19f83bbcd8..8e3ae9c1f131 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -27,7 +27,7 @@ "!pip install text-unidecode\n", "\n", "## Install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]\n", "\n", "# Install TorchAudio\n", diff --git a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb index c7af02c2406f..6204bf2516bb 100644 --- a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb +++ b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb @@ -21,7 +21,7 @@ "import os\n", "\n", "# install NeMo\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "\n", "GITHUB_ACCOUNT = 'NVIDIA' # change this if using a fork\n", "\n", diff --git a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb index e00dfc9463de..596523b41c0a 100755 --- a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb +++ b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb @@ -60,7 +60,7 @@ "outputs": [], "source": [ "## Install NeMo, which installs both nemo and nemo_text_processing package\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", "\n", "# install Pynini for text normalization\n", diff --git a/tutorials/text_processing/WFST_Tutorial.ipynb b/tutorials/text_processing/WFST_Tutorial.ipynb index 51daded0b796..ed7127241dd5 100644 --- a/tutorials/text_processing/WFST_Tutorial.ipynb +++ b/tutorials/text_processing/WFST_Tutorial.ipynb @@ -39,7 +39,7 @@ "outputs": [], "source": [ "## Install NeMo, which installs both nemo and nemo_text_processing package\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nemo_text_processing]\n", "\n", "# install Pynini for text normalization\n", diff --git a/tutorials/tools/CTC_Segmentation_Tutorial.ipynb b/tutorials/tools/CTC_Segmentation_Tutorial.ipynb index 25f63da12df9..d22258885db8 100644 --- a/tutorials/tools/CTC_Segmentation_Tutorial.ipynb +++ b/tutorials/tools/CTC_Segmentation_Tutorial.ipynb @@ -35,7 +35,7 @@ "id": "d4KCUoxSpdoZ" }, "source": [ - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "\n", "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", diff --git a/tutorials/tools/Multispeaker_Simulator.ipynb b/tutorials/tools/Multispeaker_Simulator.ipynb index e9822fd0ea9e..8b0db6e75b49 100644 --- a/tutorials/tools/Multispeaker_Simulator.ipynb +++ b/tutorials/tools/Multispeaker_Simulator.ipynb @@ -18,7 +18,7 @@ "\"\"\"\n", "\n", "NEMO_DIR_PATH = \"NeMo\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "\n", "! git clone https://github.com/NVIDIA/NeMo\n", "%cd NeMo\n", diff --git a/tutorials/tts/Aligner_Inference_Examples.ipynb b/tutorials/tts/Aligner_Inference_Examples.ipynb index d32305579166..f6acbfa2c0d4 100644 --- a/tutorials/tts/Aligner_Inference_Examples.ipynb +++ b/tutorials/tts/Aligner_Inference_Examples.ipynb @@ -39,7 +39,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/FastPitch_Finetuning.ipynb b/tutorials/tts/FastPitch_Finetuning.ipynb index 034e9e050aaf..fe607944c1e8 100755 --- a/tutorials/tts/FastPitch_Finetuning.ipynb +++ b/tutorials/tts/FastPitch_Finetuning.ipynb @@ -57,7 +57,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4\n", diff --git a/tutorials/tts/FastPitch_GermanTTS_Training.ipynb b/tutorials/tts/FastPitch_GermanTTS_Training.ipynb index a7de4224ae97..0b2e4f3fe132 100644 --- a/tutorials/tts/FastPitch_GermanTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_GermanTTS_Training.ipynb @@ -51,7 +51,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4 scipy==1.7.3\n", diff --git a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb index 7547d478b5b2..1b2ebc66ea3b 100644 --- a/tutorials/tts/FastPitch_MixerTTS_Training.ipynb +++ b/tutorials/tts/FastPitch_MixerTTS_Training.ipynb @@ -50,7 +50,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode pynini==2.1.4 scipy==1.7.3\n", diff --git a/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb b/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb index 7f55c3d2e12f..eda5bba0aa1e 100644 --- a/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb +++ b/tutorials/tts/FastPitch_Speaker_Interpolation.ipynb @@ -94,7 +94,7 @@ "source": [ "# Install NeMo library. If you are running locally (rather than on Google Colab), comment out the below lines\n", "# and instead follow the instructions at https://github.com/NVIDIA/NeMo#Installation\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/tts/Inference_DurationPitchControl.ipynb b/tutorials/tts/Inference_DurationPitchControl.ipynb index 59a01c628449..c4879f38274c 100644 --- a/tutorials/tts/Inference_DurationPitchControl.ipynb +++ b/tutorials/tts/Inference_DurationPitchControl.ipynb @@ -46,7 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/Inference_ModelSelect.ipynb b/tutorials/tts/Inference_ModelSelect.ipynb index 71067530b311..8fe398edafa6 100644 --- a/tutorials/tts/Inference_ModelSelect.ipynb +++ b/tutorials/tts/Inference_ModelSelect.ipynb @@ -46,7 +46,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies.\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Google Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", diff --git a/tutorials/tts/NeMo_TTS_Primer.ipynb b/tutorials/tts/NeMo_TTS_Primer.ipynb index 938eac687d07..21c366155b17 100644 --- a/tutorials/tts/NeMo_TTS_Primer.ipynb +++ b/tutorials/tts/NeMo_TTS_Primer.ipynb @@ -25,7 +25,7 @@ "source": [ "# Install NeMo library. If you are running locally (rather than on Google Colab), comment out the below lines\n", "# and instead follow the instructions at https://github.com/NVIDIA/NeMo#Installation\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[all]" ] }, diff --git a/tutorials/tts/Tacotron2_Training.ipynb b/tutorials/tts/Tacotron2_Training.ipynb index 995a204249a9..3642a3e9e4dc 100644 --- a/tutorials/tts/Tacotron2_Training.ipynb +++ b/tutorials/tts/Tacotron2_Training.ipynb @@ -54,7 +54,7 @@ "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", "4. Run this cell to set up dependencies# .\n", "\"\"\"\n", - "BRANCH = 'r1.13.0'\n", + "BRANCH = 'main'\n", "# # If you're using Colab and not running locally, uncomment and run this cell.\n", "# !apt-get install sox libsndfile1 ffmpeg\n", "# !pip install wget text-unidecode\n", From 342b5d2f34ae51964cd6a56ed4f2117c86ec27b7 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 13 Dec 2022 03:51:36 -0800 Subject: [PATCH 220/244] update Jenkinsfile versions Signed-off-by: Evgeniy Shabalin --- Jenkinsfile | 288 ++++++++++++++++++++++++++-------------------------- 1 file changed, 144 insertions(+), 144 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index fbd23e95ba74..95c7d6f58e28 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -93,8 +93,8 @@ pipeline { stage('L0: Unit Tests CPU') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } steps { @@ -106,8 +106,8 @@ pipeline { stage('L0: TN/ITN Tests CPU') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -139,8 +139,8 @@ pipeline { stage('L2: NeMo text processing') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -191,8 +191,8 @@ pipeline { stage('L2: ASR dev run') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -277,8 +277,8 @@ pipeline { stage('L2: ASR dev run - part two') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -328,8 +328,8 @@ pipeline { stage('L2: Speaker dev run') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -450,8 +450,8 @@ pipeline { // stage('L2: ASR DALI dev run') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -518,8 +518,8 @@ pipeline { // stage('L2: ASR RNNT dev run') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -610,8 +610,8 @@ pipeline { stage('L2: ASR Multi-dataloader dev run') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -658,8 +658,8 @@ pipeline { stage('L2: ASR Adapters') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -703,8 +703,8 @@ pipeline { stage('L2: Megatron T5 Adapter PP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -747,8 +747,8 @@ pipeline { stage('L2: Megatron T5 Adapter TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -789,8 +789,8 @@ pipeline { stage('L2: Megatron T5 IA3 PP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -833,8 +833,8 @@ pipeline { stage('L2: Megatron T5 IA3 TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -875,8 +875,8 @@ pipeline { stage('L2: Megatron GPT Adapter TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -916,8 +916,8 @@ pipeline { stage('L2: Megatron GPT Adapter PP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -958,8 +958,8 @@ pipeline { stage('L2: Speech Transcription') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -980,8 +980,8 @@ pipeline { stage('L2: Segmentation Tool') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } stages { @@ -1036,8 +1036,8 @@ pipeline { stage('L2: G2P Models') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1117,8 +1117,8 @@ pipeline { // stage('L2: Multi-GPU Megatron finetuning') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -1144,8 +1144,8 @@ pipeline { stage('L2: STS-b') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1204,8 +1204,8 @@ pipeline { stage('L2: Dialogue Classification') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1375,8 +1375,8 @@ pipeline { stage('L2: Dialogue Generation') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1441,8 +1441,8 @@ pipeline { // stage('L2: Dialogue Generation Part 2') { // when { // anyOf { -// branch 'r1.13.0' -// changeRequest target: 'r1.13.0' +// branch'main' +// changeRequest target:'main' // } // } // failFast true @@ -1471,8 +1471,8 @@ pipeline { stage('L2: COPY') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1501,8 +1501,8 @@ pipeline { stage('L2: Duplex Text Normalization') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1539,8 +1539,8 @@ pipeline { // stage('L2: MegaBERT Token Classification') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -1565,8 +1565,8 @@ pipeline { stage('L2: BERT Text Classification') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1594,8 +1594,8 @@ pipeline { stage('L2: Parallel BERT Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1653,8 +1653,8 @@ pipeline { stage('L2: Parallel BART Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1714,8 +1714,8 @@ pipeline { stage('L2: Parallel GPT2 Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1775,8 +1775,8 @@ pipeline { stage('L2: Intent and Slot Classification Tasks') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -1815,8 +1815,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Text Classification') { // when { // anyOf{ - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -1844,8 +1844,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Autoresume') { // when { // anyOf{ - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -1875,8 +1875,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Evaluation from .nemo') { // when { // anyOf{ - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -1896,8 +1896,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Train from .nemo') { // when { // anyOf{ - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -1919,8 +1919,8 @@ pipeline { stage('L2: Parallel NLP Examples 2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2044,8 +2044,8 @@ pipeline { stage('Punctuation & Capitalization tarred dataset') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2103,8 +2103,8 @@ pipeline { stage('Punctuation & Capitalization, Different ways of passing labels to model') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2211,8 +2211,8 @@ pipeline { stage('Punctuation & Capitalization inference') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2237,8 +2237,8 @@ pipeline { stage('L2: Parallel Pretraining BERT pretraining from Text/Preprocessed') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2299,8 +2299,8 @@ pipeline { stage('L2: Entity Linking') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2327,8 +2327,8 @@ pipeline { stage('L2: NMT Attention is All You Need Training') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2450,8 +2450,8 @@ pipeline { stage('L2: NMT Attention is All You Need Inference') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2486,8 +2486,8 @@ pipeline { stage('L2: NMT Attention is All You Need Finetuning') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2520,8 +2520,8 @@ pipeline { stage('L2: NMT with HuggingFace') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2597,8 +2597,8 @@ pipeline { stage('L2: NMT Tarred Dataset Creation') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2651,8 +2651,8 @@ pipeline { stage('L2: Megatron NMT Training TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -2746,8 +2746,8 @@ pipeline { // stage('L2: NMT Bottleneck Fallback') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -2793,8 +2793,8 @@ pipeline { // stage('L2: NMT Bottleneck Architecture') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -2876,8 +2876,8 @@ pipeline { // stage('L2: NMT Bottleneck LVM') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -3029,8 +3029,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3100,8 +3100,8 @@ pipeline { stage('L2: Megatron RETRO Pretraining and Resume Training') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3172,8 +3172,8 @@ pipeline { stage('L2: Megatron RETRO muTransfer Pretraining Performance') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3255,8 +3255,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: BioMegatron Bert NER Task') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3273,8 +3273,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3345,8 +3345,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training PP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3417,8 +3417,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3434,8 +3434,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval PP2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3483,8 +3483,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Prompt Learning') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3556,8 +3556,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' // stage('L2: Megatron GPT Convert from Megatron-LM checkpoing and Eval') { // when { // anyOf { - // branch 'r1.13.0' - // changeRequest target: 'r1.13.0' + // branch'main' + // changeRequest target:'main' // } // } // failFast true @@ -3583,8 +3583,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron Change Partitions') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3622,8 +3622,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3718,8 +3718,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training PP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3832,8 +3832,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Prompt Learning') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -3931,8 +3931,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron UL2 Pretraining and Resume Training TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -4011,8 +4011,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Eval') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -4028,8 +4028,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, TP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -4097,8 +4097,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, PP=2') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -4170,8 +4170,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 GLUE/XNLI Finetuning') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true @@ -4243,8 +4243,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: TTS Fast dev runs 1') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } parallel { @@ -4389,8 +4389,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L??: Speech Checkpoints tests') { when { anyOf { - branch 'r1.13.0' - changeRequest target: 'r1.13.0' + branch'main' + changeRequest target:'main' } } failFast true From 0cfa92986d7e777b0aab52547929d09c42989e3d Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Tue, 13 Dec 2022 10:39:40 -0800 Subject: [PATCH 221/244] fixed issues Signed-off-by: Evgeniy Shabalin --- .github/workflows/import-test.yml | 2 +- Jenkinsfile | 308 +++++++++++++++--------------- requirements/requirements.txt | 1 - 3 files changed, 155 insertions(+), 156 deletions(-) diff --git a/.github/workflows/import-test.yml b/.github/workflows/import-test.yml index d4662bb454ba..5fc34347710d 100644 --- a/.github/workflows/import-test.yml +++ b/.github/workflows/import-test.yml @@ -12,7 +12,7 @@ jobs: # Check https://hub.docker.com/r/pytorch/pytorch/tags for latest tags container: - image: pytorch/pytorch:1.12.1-cuda11.3-cudnn8-runtime + image: pytorch/pytorch:1.11.0-cuda11.3-cudnn8-runtime steps: - uses: actions/checkout@v2 diff --git a/Jenkinsfile b/Jenkinsfile index 95c7d6f58e28..80e1ebb251eb 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -93,8 +93,8 @@ pipeline { stage('L0: Unit Tests CPU') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } steps { @@ -106,8 +106,8 @@ pipeline { stage('L0: TN/ITN Tests CPU') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -139,8 +139,8 @@ pipeline { stage('L2: NeMo text processing') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -191,8 +191,8 @@ pipeline { stage('L2: ASR dev run') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -277,8 +277,8 @@ pipeline { stage('L2: ASR dev run - part two') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -307,8 +307,8 @@ pipeline { stage('L2: Speech to Text EMA') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } steps { @@ -328,8 +328,8 @@ pipeline { stage('L2: Speaker dev run') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -450,8 +450,8 @@ pipeline { // stage('L2: ASR DALI dev run') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -518,8 +518,8 @@ pipeline { // stage('L2: ASR RNNT dev run') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -580,8 +580,8 @@ pipeline { // stage('L2: Hybrid ASR RNNT-CTC dev run') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -610,8 +610,8 @@ pipeline { stage('L2: ASR Multi-dataloader dev run') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -658,8 +658,8 @@ pipeline { stage('L2: ASR Adapters') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -703,8 +703,8 @@ pipeline { stage('L2: Megatron T5 Adapter PP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -747,8 +747,8 @@ pipeline { stage('L2: Megatron T5 Adapter TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -789,8 +789,8 @@ pipeline { stage('L2: Megatron T5 IA3 PP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -833,8 +833,8 @@ pipeline { stage('L2: Megatron T5 IA3 TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -875,8 +875,8 @@ pipeline { stage('L2: Megatron GPT Adapter TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -916,8 +916,8 @@ pipeline { stage('L2: Megatron GPT Adapter PP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -958,8 +958,8 @@ pipeline { stage('L2: Speech Transcription') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -980,8 +980,8 @@ pipeline { stage('L2: Segmentation Tool') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } stages { @@ -1036,8 +1036,8 @@ pipeline { stage('L2: G2P Models') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1117,8 +1117,8 @@ pipeline { // stage('L2: Multi-GPU Megatron finetuning') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -1144,8 +1144,8 @@ pipeline { stage('L2: STS-b') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1204,8 +1204,8 @@ pipeline { stage('L2: Dialogue Classification') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1375,8 +1375,8 @@ pipeline { stage('L2: Dialogue Generation') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1441,8 +1441,8 @@ pipeline { // stage('L2: Dialogue Generation Part 2') { // when { // anyOf { -// branch'main' -// changeRequest target:'main' +// branch 'main' +// changeRequest target: 'main' // } // } // failFast true @@ -1471,8 +1471,8 @@ pipeline { stage('L2: COPY') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1501,8 +1501,8 @@ pipeline { stage('L2: Duplex Text Normalization') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1539,8 +1539,8 @@ pipeline { // stage('L2: MegaBERT Token Classification') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -1565,8 +1565,8 @@ pipeline { stage('L2: BERT Text Classification') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1594,8 +1594,8 @@ pipeline { stage('L2: Parallel BERT Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1653,8 +1653,8 @@ pipeline { stage('L2: Parallel BART Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1714,8 +1714,8 @@ pipeline { stage('L2: Parallel GPT2 Question-Answering SQUAD v1.1 & v2.0') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1775,8 +1775,8 @@ pipeline { stage('L2: Intent and Slot Classification Tasks') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -1815,8 +1815,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Text Classification') { // when { // anyOf{ - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -1844,8 +1844,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Autoresume') { // when { // anyOf{ - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -1875,8 +1875,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Evaluation from .nemo') { // when { // anyOf{ - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -1896,8 +1896,8 @@ pipeline { // stage('L2: Model Parallel Size 2 Megatron Train from .nemo') { // when { // anyOf{ - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -1919,8 +1919,8 @@ pipeline { stage('L2: Parallel NLP Examples 2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2044,8 +2044,8 @@ pipeline { stage('Punctuation & Capitalization tarred dataset') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2103,8 +2103,8 @@ pipeline { stage('Punctuation & Capitalization, Different ways of passing labels to model') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2211,8 +2211,8 @@ pipeline { stage('Punctuation & Capitalization inference') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2237,8 +2237,8 @@ pipeline { stage('L2: Parallel Pretraining BERT pretraining from Text/Preprocessed') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2299,8 +2299,8 @@ pipeline { stage('L2: Entity Linking') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2327,8 +2327,8 @@ pipeline { stage('L2: NMT Attention is All You Need Training') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2450,8 +2450,8 @@ pipeline { stage('L2: NMT Attention is All You Need Inference') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2486,8 +2486,8 @@ pipeline { stage('L2: NMT Attention is All You Need Finetuning') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2520,8 +2520,8 @@ pipeline { stage('L2: NMT with HuggingFace') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2597,8 +2597,8 @@ pipeline { stage('L2: NMT Tarred Dataset Creation') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2651,8 +2651,8 @@ pipeline { stage('L2: Megatron NMT Training TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -2746,8 +2746,8 @@ pipeline { // stage('L2: NMT Bottleneck Fallback') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -2793,8 +2793,8 @@ pipeline { // stage('L2: NMT Bottleneck Architecture') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -2876,8 +2876,8 @@ pipeline { // stage('L2: NMT Bottleneck LVM') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -2959,8 +2959,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training with Pipeline Paralleism') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3029,8 +3029,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3100,8 +3100,8 @@ pipeline { stage('L2: Megatron RETRO Pretraining and Resume Training') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3172,8 +3172,8 @@ pipeline { stage('L2: Megatron RETRO muTransfer Pretraining Performance') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3255,8 +3255,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: BioMegatron Bert NER Task') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3273,8 +3273,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3345,8 +3345,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Pretraining and Resume Training PP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3417,8 +3417,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3434,8 +3434,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Eval PP2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3452,8 +3452,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Prompt Tuning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3483,8 +3483,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Prompt Learning') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3556,8 +3556,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' // stage('L2: Megatron GPT Convert from Megatron-LM checkpoing and Eval') { // when { // anyOf { - // branch'main' - // changeRequest target:'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -3583,8 +3583,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron Change Partitions') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3622,8 +3622,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3718,8 +3718,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Pretraining and Resume Training PP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3788,8 +3788,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 w/ Mixture of Expert Pretraining') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3832,8 +3832,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Prompt Learning') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3931,8 +3931,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron UL2 Pretraining and Resume Training TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -4011,8 +4011,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 Eval') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -4028,8 +4028,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, TP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -4097,8 +4097,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron BART Pretraining and Resume Training, PP=2') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -4170,8 +4170,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 GLUE/XNLI Finetuning') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -4243,8 +4243,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: TTS Fast dev runs 1') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } parallel { @@ -4389,8 +4389,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L??: Speech Checkpoints tests') { when { anyOf { - branch'main' - changeRequest target:'main' + branch 'main' + changeRequest target: 'main' } } failFast true diff --git a/requirements/requirements.txt b/requirements/requirements.txt index 2964747ec2b6..aad836335461 100644 --- a/requirements/requirements.txt +++ b/requirements/requirements.txt @@ -6,7 +6,6 @@ python-dateutil ruamel.yaml scikit-learn setuptools==59.5.0 -setuptools==59.5.0 tensorboard text-unidecode torch From d2ac6ed3b75ea8f6ceeb3633ccf8da9d8441b745 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 14 Dec 2022 00:22:22 -0800 Subject: [PATCH 222/244] fixed more issues --- Jenkinsfile | 22 +- nemo/collections/tts/models/vits.py | 1 + nemo/package_info.py | 2 +- tutorials/01_NeMo_Models.ipynb | 514 ++++---- tutorials/02_NeMo_Adapters.ipynb | 6 +- tutorials/AudioTranslationSample.ipynb | 2 +- ...blish_NeMo_Model_On_Hugging_Face_Hub.ipynb | 484 +++---- tutorials/VoiceSwapSample.ipynb | 2 +- .../asr/ASR_CTC_Language_Finetuning.ipynb | 546 ++++---- tutorials/asr/ASR_for_telephony_speech.ipynb | 2 +- tutorials/asr/ASR_with_NeMo.ipynb | 216 +-- .../asr/ASR_with_Subword_Tokenization.ipynb | 1162 ++++++++--------- ..._Transducer_Inference_with_LCS_Merge.ipynb | 526 ++++---- tutorials/nlp/MegatronBert_export.ipynb | 2 +- .../nlp/Multitask_Prompt_and_PTuning.ipynb | 2 +- tutorials/nlp/Question_Answering_Squad.ipynb | 725 ---------- .../Token_Classification-BioMegatron.ipynb | 2 +- .../nlp/Zero_Shot_Intent_Recognition.ipynb | 2 +- .../ITN_with_Thutmose_Tagger.ipynb | 2 +- .../Text_(Inverse)_Normalization.ipynb | 934 ++++++------- 20 files changed, 2217 insertions(+), 2937 deletions(-) delete mode 100755 tutorials/nlp/Question_Answering_Squad.ipynb diff --git a/Jenkinsfile b/Jenkinsfile index 80e1ebb251eb..75e07f653d17 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -307,8 +307,8 @@ pipeline { stage('L2: Speech to Text EMA') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } steps { @@ -580,8 +580,8 @@ pipeline { // stage('L2: Hybrid ASR RNNT-CTC dev run') { // when { // anyOf { - // branch 'main' - // changeRequest target: 'main' + // branch 'main' + // changeRequest target: 'main' // } // } // failFast true @@ -2959,8 +2959,8 @@ pipeline { stage('L2: Megatron Bert Pretraining and Resume Training with Pipeline Paralleism') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3452,8 +3452,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron GPT Prompt Tuning') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -3788,8 +3788,8 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' stage('L2: Megatron T5 w/ Mixture of Expert Pretraining') { when { anyOf { - branch 'main' - changeRequest target: 'main' + branch 'main' + changeRequest target: 'main' } } failFast true @@ -4411,4 +4411,4 @@ assert_frame_equal(training_curve, gt_curve, rtol=1e-3, atol=1e-3)"''' cleanWs() } } -} +} \ No newline at end of file diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 906524c29c13..0c20ae65ad48 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -42,6 +42,7 @@ HAVE_WANDB = False +@experimental class VitsModel(TextToWaveform): def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None): # Convert to Hydra 1.0 compatible DictConfig diff --git a/nemo/package_info.py b/nemo/package_info.py index 3570f1ff1f6f..071179db59c3 100644 --- a/nemo/package_info.py +++ b/nemo/package_info.py @@ -16,7 +16,7 @@ MAJOR = 1 MINOR = 14 PATCH = 0 -PRE_RELEASE = '' +PRE_RELEASE = 'rc0' # Use the following formatting: (major, minor, patch, pre-release) VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE) diff --git a/tutorials/01_NeMo_Models.ipynb b/tutorials/01_NeMo_Models.ipynb index df4491ff15f9..6f230e62c1a3 100644 --- a/tutorials/01_NeMo_Models.ipynb +++ b/tutorials/01_NeMo_Models.ipynb @@ -1,12 +1,24 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "01_NeMo_Models.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, "cells": [ { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ASnx4b5jXsil" }, - "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -33,7 +45,9 @@ "\n", "## Grab the config we'll use in this example\n", "!mkdir configs" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -160,17 +174,17 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "piLOgwOPX1FS" }, - "outputs": [], "source": [ "import torch\n", "import nemo\n", "from nemo.core import NeuralModule\n", "from nemo.core import typecheck" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -194,29 +208,29 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "bseLiNoqqQrE" }, - "outputs": [], "source": [ "class MyEmptyModule(NeuralModule):\n", "\n", " def forward(self):\n", " print(\"Neural Module ~ hello world!\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "j4Q36L5urdOQ" }, - "outputs": [], "source": [ "x = MyEmptyModule()\n", "x()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -247,33 +261,33 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ZvC57bbxwXxN" }, - "outputs": [], "source": [ "# Case 1:\n", "embedding = torch.nn.Embedding(num_embeddings=10, embedding_dim=30)\n", "x = torch.randint(high=10, size=(1, 5))\n", "print(\"x :\", x)\n", "print(\"embedding(x) :\", embedding(x).shape)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "sMaqhMBgxe2C" }, - "outputs": [], "source": [ "# Case 2\n", "lstm = torch.nn.LSTM(1, 30, batch_first=True)\n", "x = torch.randn(1, 5, 1)\n", "print(\"x :\", x)\n", "print(\"lstm(x) :\", lstm(x)[0].shape) # Let's take all timestep outputs of the LSTM" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -326,23 +340,21 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "yp0FG8NJt1Jd" }, - "outputs": [], "source": [ "from nemo.core.neural_types import NeuralType\n", "from nemo.core.neural_types import *" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "3tsgs8Fp0-WV" }, - "outputs": [], "source": [ "class EmbeddingModule(NeuralModule):\n", " def __init__(self):\n", @@ -364,7 +376,9 @@ " return {\n", " 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EmbeddedTextType())\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -428,14 +442,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "boxxMniv27vi" }, - "outputs": [], "source": [ "embedding_module = EmbeddingModule()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -448,11 +462,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "SZZOOoCJ2-iV" }, - "outputs": [], "source": [ "class LSTMModule(NeuralModule):\n", " def __init__(self):\n", @@ -474,7 +486,9 @@ " return {\n", " 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation())\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -492,14 +506,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "6LlOJf0C8GN4" }, - "outputs": [], "source": [ "lstm_module = LSTMModule()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -513,17 +527,17 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "giLJlub78-Ja" }, - "outputs": [], "source": [ "# Case 1 [ERROR CELL]\n", "x1 = torch.randint(high=10, size=(1, 5))\n", "print(\"x :\", x1)\n", "print(\"embedding(x) :\", embedding_module(x1).shape)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -539,16 +553,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "2KUj_p6M9L-f" }, - "outputs": [], "source": [ "# Case 1\n", "print(\"x :\", x1)\n", "print(\"embedding(x) :\", embedding_module(x=x1).shape)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -561,17 +575,17 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "FMu3B0-9-CqE" }, - "outputs": [], "source": [ "# Case 2 [ERROR CELL]\n", "x2 = torch.randn(1, 5, 1) # Input = [B=1, T=5, C=1]\n", "print(\"x :\", x2)\n", "print(\"lstm(x) :\", lstm_module(x=x2)[0].shape) # Let's take all timestep outputs of the LSTM" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -597,11 +611,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "q2u-keAM-d-B" }, - "outputs": [], "source": [ "class CorrectLSTMModule(LSTMModule): # Let's inherit the wrong class to make it easy to override\n", " @property\n", @@ -610,7 +622,9 @@ " 'y': NeuralType(axes=('B', 'T', 'C'), elements_type=EncodedRepresentation()),\n", " 'h_c': [NeuralType(axes=('D', 'B', 'C'), elements_type=EncodedRepresentation())],\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -627,22 +641,20 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "GyPZH-fz_dG4" }, - "outputs": [], "source": [ "lstm_module = CorrectLSTMModule()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "9whH50PE_Xyx" }, - "outputs": [], "source": [ "# Case 2\n", "x2 = torch.randn(1, 5, 1)\n", @@ -651,7 +663,9 @@ "print(\"lstm(x) :\", y2.shape) # The output of the LSTM RNN\n", "print(\"hidden state (h) :\", h.shape) # The first hidden state of the LSTM RNN\n", "print(\"hidden state (c) :\", c.shape) # The second hidden state of the LSTM RNN" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -669,30 +683,30 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "bGQ9XbWU_ffa" }, - "outputs": [], "source": [ "emb_out = embedding_module(x=x1)\n", "lstm_out = lstm_module(x=x2)[0]\n", "\n", "assert hasattr(emb_out, 'neural_type')\n", "assert hasattr(lstm_out, 'neural_type')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "kEpBruSOScPJ" }, - "outputs": [], "source": [ "print(\"Embedding tensor :\", emb_out.neural_type)\n", "print(\"LSTM tensor :\", lstm_out.neural_type)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -710,25 +724,25 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "8AU9FMtdATIm" }, - "outputs": [], "source": [ "emb_out.neural_type.compare(lstm_out.neural_type)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "2cqnqAGIBCjA" }, - "outputs": [], "source": [ "emb_out.neural_type == lstm_out.neural_type" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -761,11 +775,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "AGbKB4gJEzcU" }, - "outputs": [], "source": [ "embedding_module = EmbeddingModule()\n", "x1 = torch.randint(high=10, size=(1, 5))\n", @@ -774,21 +786,23 @@ "x1.neural_type = NeuralType(('B', 'T'), Index())\n", "\n", "print(\"embedding(x) :\", embedding_module(x=x1).shape)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "F0j-evylFM5j" }, - "outputs": [], "source": [ "# Attach wrong neural type [ERROR CELL]\n", "x1.neural_type = NeuralType(('B', 'T'), LabelsType())\n", "\n", "print(\"embedding(x) :\", embedding_module(x=x1).shape)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -805,11 +819,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "raFkuSRaBAE0" }, - "outputs": [], "source": [ "import math\n", "from typing import List, Set, Dict, Tuple, Optional\n", @@ -817,7 +829,9 @@ "import torch\n", "import torch.nn as nn\n", "from torch.nn import functional as F" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -834,11 +848,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ybhLLVyUF0mo" }, - "outputs": [], "source": [ "class AttentionType(EncodedRepresentation):\n", " \"\"\"Basic Attention Element Type\"\"\"\n", @@ -848,7 +860,9 @@ "\n", "class CausalSelfAttentionType(SelfAttentionType):\n", " \"\"\"Causal Self Attention Element Type\"\"\"" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -865,11 +879,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "w4oXpAL_CoDp" }, - "outputs": [], "source": [ "class CausalSelfAttention(nn.Module):\n", " \"\"\"\n", @@ -934,7 +946,9 @@ " x = x + self.attn(self.ln1(x))\n", " x = x + self.mlp(self.ln2(x))\n", " return x" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -966,16 +980,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "0TsfmCYthMux" }, - "outputs": [], "source": [ "import pytorch_lightning as ptl\n", "from nemo.core import ModelPT\n", "from omegaconf import OmegaConf" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -991,11 +1005,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "98x9-Fh-HVwj" }, - "outputs": [], "source": [ "class PTLGPT(ptl.LightningModule):\n", " def __init__(self,\n", @@ -1065,7 +1077,9 @@ " elif isinstance(module, nn.LayerNorm):\n", " module.bias.data.zero_()\n", " module.weight.data.fill_(1.0)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1079,14 +1093,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "rrXIBzg4wutC" }, - "outputs": [], "source": [ "m = PTLGPT(vocab_size=100, block_size=32, n_layer=1, n_embd=32, n_head=4)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1138,11 +1152,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "uYwMyjqK05RL" }, - "outputs": [], "source": [ "class GPTEmbedding(NeuralModule):\n", " def __init__(self, vocab_size: int, n_embd: int, block_size: int, embd_pdrop: float = 0.0):\n", @@ -1174,7 +1186,9 @@ " return {\n", " 'embeddings': NeuralType(('B', 'T', 'C'), EmbeddedTextType())\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1203,11 +1217,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "1QeQnQ_G2PwH" }, - "outputs": [], "source": [ "class GPTTransformerEncoder(NeuralModule):\n", " def __init__(self, n_embd: int, block_size: int, n_head: int, n_layer: int, attn_pdrop: float = 0.0, resid_pdrop: float = 0.0):\n", @@ -1231,7 +1243,9 @@ " return {\n", " 'encoding': NeuralType(('B', 'T', 'C'), CausalSelfAttentionType())\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1254,11 +1268,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "VCPUu0EWQIBX" }, - "outputs": [], "source": [ "class GPTDecoder(NeuralModule):\n", " def __init__(self, n_embd: int, vocab_size: int):\n", @@ -1283,7 +1295,9 @@ " return {\n", " 'logits': NeuralType(('B', 'T', 'C'), LogitsType())\n", " }\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1300,11 +1314,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ZQlmtYU6iDwi" }, - "outputs": [], "source": [ "class AbstractNeMoGPT(ModelPT):\n", " def __init__(self, cfg: OmegaConf, trainer: ptl.Trainer = None):\n", @@ -1363,7 +1375,9 @@ " return {\n", " 'logits': NeuralType(('B', 'T', 'C'), LogitsType())\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1382,11 +1396,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "uygo0BEYjKuj" }, - "outputs": [], "source": [ "# model definition args (required)\n", "# ================================\n", @@ -1401,7 +1413,9 @@ "# embd_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on input embeddings\n", "# resid_pdrop: float = 0.1, # \\in [0,1]: amount of dropout in each residual connection\n", "# attn_pdrop: float = 0.1, # \\in [0,1]: amount of dropout on the attention matrix" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1417,27 +1431,27 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "XqLSZq7Soo2j" }, - "outputs": [], "source": [ "from omegaconf import MISSING" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "JTH-1vu8TO7o" }, - "outputs": [], "source": [ "# Let's create a utility for building the class path\n", "def get_class_path(cls):\n", " return f'{cls.__module__}.{cls.__name__}'" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1452,11 +1466,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ZCvLdOlMVLy_" }, - "outputs": [], "source": [ "common_config = OmegaConf.create({\n", " 'vocab_size': MISSING,\n", @@ -1465,7 +1477,9 @@ " 'n_embd': MISSING,\n", " 'n_head': MISSING,\n", "})" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1496,11 +1510,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ntsxQKH0pDac" }, - "outputs": [], "source": [ "embedding_config = OmegaConf.create({\n", " '_target_': get_class_path(GPTEmbedding),\n", @@ -1526,7 +1538,9 @@ " 'n_embd': '${model.n_embd}',\n", " 'vocab_size': '${model.vocab_size}'\n", "})" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1577,11 +1591,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "c8hvNeB_aDgi" }, - "outputs": [], "source": [ "model_config = OmegaConf.create({\n", " 'model': common_config\n", @@ -1591,7 +1603,9 @@ "model_config.model.embedding = embedding_config\n", "model_config.model.encoder = encoder_config\n", "model_config.model.decoder = decoder_config" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1605,14 +1619,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "2SyKNgp9pG0N" }, - "outputs": [], "source": [ "print(OmegaConf.to_yaml(model_config))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1628,22 +1642,20 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "0X4C76JyOAnN" }, - "outputs": [], "source": [ "import copy" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ugxA0TPtbHVZ" }, - "outputs": [], "source": [ "temp_config = copy.deepcopy(model_config)\n", "temp_config.model.vocab_size = 10\n", @@ -1654,7 +1666,9 @@ "\n", "temp_config = OmegaConf.create(OmegaConf.to_container(temp_config, resolve=True))\n", "print(OmegaConf.to_yaml(temp_config))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1668,23 +1682,21 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "IIIVi2IfpsJ4" }, - "outputs": [], "source": [ "# Let's work on a copy of the model config and update it before we send it into the Model.\n", "cfg = copy.deepcopy(model_config)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "OllBhswPqQXq" }, - "outputs": [], "source": [ "# Let's set the values of the config (for some plausible small model)\n", "cfg.model.vocab_size = 100\n", @@ -1692,30 +1704,32 @@ "cfg.model.n_layer = 1\n", "cfg.model.n_embd = 32\n", "cfg.model.n_head = 4" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "QJm2LnTqqcIM" }, - "outputs": [], "source": [ "print(OmegaConf.to_yaml(cfg))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "E7tpB8BcqeBO" }, - "outputs": [], "source": [ "# Try to create a model with this config [ERROR CELL]\n", "m = AbstractNeMoGPT(cfg.model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1745,22 +1759,20 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Vcwi1lO7t7Sm" }, - "outputs": [], "source": [ "from nemo.core.classes.common import PretrainedModelInfo" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ckCxyVLYqrz0" }, - "outputs": [], "source": [ "class BasicNeMoGPT(AbstractNeMoGPT):\n", "\n", @@ -1776,7 +1788,9 @@ " \n", " def setup_test_data(self, test_data_config: OmegaConf):\n", " self._test_dl = None" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1790,14 +1804,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "G8iYQSC5vptU" }, - "outputs": [], "source": [ "m = BasicNeMoGPT(cfg.model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1822,11 +1836,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "QU3oQAVovxRg" }, - "outputs": [], "source": [ "class BasicNeMoGPTWithSteps(BasicNeMoGPT):\n", "\n", @@ -1856,18 +1868,20 @@ " def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):\n", " test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()\n", " return {'test_loss': test_loss_mean}" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "2Ki3kRxag511" }, - "outputs": [], "source": [ "m = BasicNeMoGPTWithSteps(cfg=cfg.model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1918,11 +1932,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "FgXkZQiVjnOv" }, - "outputs": [], "source": [ "class BasicNeMoGPTWithOptim(BasicNeMoGPTWithSteps):\n", "\n", @@ -1971,18 +1983,20 @@ " ]\n", " optimizer = torch.optim.AdamW(optim_groups, lr=self.cfg.optim.lr, betas=self.cfg.optim.betas)\n", " return optimizer\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "kARDwthakEQk" }, - "outputs": [], "source": [ "m = BasicNeMoGPTWithOptim(cfg=cfg.model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1996,11 +2010,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "5K7zh9Cn2s2u" }, - "outputs": [], "source": [ "OmegaConf.set_struct(cfg.model, False)\n", "\n", @@ -2013,7 +2025,9 @@ "cfg.model.optim = optim_config\n", "\n", "OmegaConf.set_struct(cfg.model, True)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2052,24 +2066,22 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "E-fswFkig9t4" }, - "outputs": [], "source": [ "from nemo.core import Dataset\n", "from torch.utils import data\n", "from torch.utils.data.dataloader import DataLoader" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "-Z8XuPeClGNm" }, - "outputs": [], "source": [ "class TinyShakespeareDataset(Dataset):\n", "\n", @@ -2124,7 +2136,9 @@ " 'input': NeuralType(('B', 'T'), Index()),\n", " 'target': NeuralType(('B', 'T'), LabelsType())\n", " }" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2154,50 +2168,50 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "VwsdXtVzo--t" }, - "outputs": [], "source": [ "import os" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "QvKcDCvIl9-A" }, - "outputs": [], "source": [ "if not os.path.exists('tiny-shakespeare.txt'):\n", " !wget https://raw.githubusercontent.com/jcjohnson/torch-rnn/master/data/tiny-shakespeare.txt" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ynCwqDu6vK8P" }, - "outputs": [], "source": [ "!head -n 5 tiny-shakespeare.txt" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "bfRL4t9_oS4C" }, - "outputs": [], "source": [ "train_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(0, int(1e6)))\n", "val_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(int(1e6), int(50e3)), override_vocab=train_dataset.vocab)\n", "test_dataset = TinyShakespeareDataset('tiny-shakespeare.txt', cfg.model.block_size, crop=(int(1.05e6), int(100e3)), override_vocab=train_dataset.vocab)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2216,11 +2230,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "SVSfIk_-rMSg" }, - "outputs": [], "source": [ "class NeMoGPT(BasicNeMoGPTWithOptim):\n", "\n", @@ -2258,7 +2270,9 @@ " \n", " def setup_test_data(self, test_data_config: OmegaConf):\n", " self._test_dl = self._setup_data_loader(test_data_config)\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2273,11 +2287,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "C6zcTqJixOOL" }, - "outputs": [], "source": [ "OmegaConf.set_struct(cfg.model, False)\n", "\n", @@ -2286,15 +2298,15 @@ "cfg.model.vocab_size = train_dataset.vocab_size\n", "\n", "OmegaConf.set_struct(cfg.model, True)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "zlvThf7BysyT" }, - "outputs": [], "source": [ "train_ds = OmegaConf.create({\n", " 'data_path': '${model.data_path}',\n", @@ -2319,15 +2331,15 @@ " 'batch_size': 4,\n", " 'shuffle': False,\n", "})" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "QVVzR6WKyMT5" }, - "outputs": [], "source": [ "# Attach to the model config\n", "OmegaConf.set_struct(cfg.model, False)\n", @@ -2337,31 +2349,33 @@ "cfg.model.test_ds = test_ds\n", "\n", "OmegaConf.set_struct(cfg.model, True)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "nd_9_mxS0ET-" }, - "outputs": [], "source": [ "# Let's see the config now !\n", "print(OmegaConf.to_yaml(cfg))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "dlwSQENU0JxA" }, - "outputs": [], "source": [ "# Let's try creating a model now !\n", "model = NeMoGPT(cfg=cfg.model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2396,11 +2410,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "johk6Z0e0WEm" }, - "outputs": [], "source": [ "if torch.cuda.is_available():\n", " accelerator = 'gpu'\n", @@ -2408,18 +2420,20 @@ " accelerator = 'cpu'\n", "\n", "trainer = ptl.Trainer(devices=1, accelerator=accelerator, limit_test_batches=1.0)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "oqeeofEr1S8e" }, - "outputs": [], "source": [ "trainer.test(model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2436,48 +2450,48 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "DksG_-7G1Vbe" }, - "outputs": [], "source": [ "model.save_to('gpt_model.nemo')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "JhjoFdCnBWVh" }, - "outputs": [], "source": [ "!ls -d -- *.nemo" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "567txSF0BYXN" }, - "outputs": [], "source": [ "temp_model = NeMoGPT.restore_from('gpt_model.nemo')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "YvnfG0kxBfTt" }, - "outputs": [], "source": [ "# [ERROR CELL]\n", "temp_model.setup_test_data(temp_model.cfg.test_ds)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2496,11 +2510,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "_Atyoc4NBjEV" }, - "outputs": [], "source": [ "class NeMoGPTv2(NeMoGPT):\n", " \n", @@ -2540,61 +2552,61 @@ " self.vocab = vocab\n", "\n", " self._test_dl = self._setup_data_loader(test_data_config)\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "mn09jsRZDusN" }, - "outputs": [], "source": [ "# Let's try creating a model now !\n", "model = NeMoGPTv2(cfg=cfg.model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "sQPIPySDD1K0" }, - "outputs": [], "source": [ "# Now let's try to save and restore !\n", "model.save_to('gpt_model.nemo')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "0YwCJ4xaJ3bU" }, - "outputs": [], "source": [ "temp_model = NeMoGPTv2.restore_from('gpt_model.nemo')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "tcxwDIIWKKCQ" }, - "outputs": [], "source": [ "temp_model.setup_multiple_test_data(temp_model.cfg.test_ds)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "j3Olm6ZTKRbO" }, - "outputs": [], "source": [ "if torch.cuda.is_available():\n", " accelerator = 'gpu'\n", @@ -2602,18 +2614,20 @@ " accelerator = 'cpu'\n", "\n", "trainer = ptl.Trainer(devices=1, accelerator=accelerator, limit_test_batches =1.0)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "_QE2SngCKV2p" }, - "outputs": [], "source": [ "trainer.test(model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2627,26 +2641,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ZjCV5u3_OO7a" }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "01_NeMo_Models.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" + "source": [ + "" + ], + "execution_count": null, + "outputs": [] } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tutorials/02_NeMo_Adapters.ipynb b/tutorials/02_NeMo_Adapters.ipynb index 273c2fa9b7b2..75942c6bf4af 100644 --- a/tutorials/02_NeMo_Adapters.ipynb +++ b/tutorials/02_NeMo_Adapters.ipynb @@ -1657,7 +1657,9 @@ "id": "iz2wF3cd-6MF" }, "outputs": [], - "source": [] + "source": [ + "" + ] } ], "metadata": { @@ -1676,4 +1678,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/tutorials/AudioTranslationSample.ipynb b/tutorials/AudioTranslationSample.ipynb index 524e0d31d1e2..c4fec16c4181 100644 --- a/tutorials/AudioTranslationSample.ipynb +++ b/tutorials/AudioTranslationSample.ipynb @@ -284,4 +284,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb index a5b62079e441..1b951e7b9e8c 100644 --- a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb +++ b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb @@ -1,4 +1,20 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, "cells": [ { "cell_type": "code", @@ -31,22 +47,19 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "J6d04-VRjC-O" - }, - "outputs": [], "source": [ "### Install Hugging Face Hub\n", "!python -m pip install huggingface_hub\n", "!python -m pip install evaluate" - ] + ], + "metadata": { + "id": "J6d04-VRjC-O" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "aS-Y5O_oGBTc" - }, "source": [ "# NeMo models on Hugging Face Hub\n", "\n", @@ -55,103 +68,103 @@ "This enables community members to share their NeMo models (any model!) with all users of NeMo!\n", "\n", "**Note**: While in this tutorial we showcase an ASR model, there is no particular restriction to any domain - all NeMo models (.nemo files) of every domain can be uploaded and shared in the same way." - ] + ], + "metadata": { + "id": "aS-Y5O_oGBTc" + } }, { "cell_type": "markdown", - "metadata": { - "id": "Us3UlvwCiEZi" - }, "source": [ "# Login to Hugging Face\n", "\n", "Use the notebook login, and access your user access token (or create one to upload models to Hugging Face).\n", "\n", "For more information, visit the User Access Token section - https://huggingface.co/docs/hub/security-tokens" - ] + ], + "metadata": { + "id": "Us3UlvwCiEZi" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "4RTYbCLziEnb" - }, - "outputs": [], "source": [ "from huggingface_hub import notebook_login\n", "\n", "notebook_login()" - ] + ], + "metadata": { + "id": "4RTYbCLziEnb" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "!git config --global credential.helper store" + ], "metadata": { "id": "dgZbTPcFiaml" }, - "outputs": [], - "source": [ - "!git config --global credential.helper store" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "s-FiNn1eiFAl" - }, "source": [ "# Prepare a model to upload to HF\n", "\n", "In this example, we will download a NeMo ASR model from NGC and then upload it to Hugging Face for simplicity and to showcase the method.\n", "\n", "**You can swap out this ASR model for any model that you restore via `restore_from()` and follow the same steps to upload your own models !**" - ] + ], + "metadata": { + "id": "s-FiNn1eiFAl" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5KnVl-M0ax14" - }, - "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", "\n", "from omegaconf import DictConfig, OmegaConf, open_dict" - ] + ], + "metadata": { + "id": "5KnVl-M0ax14" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ZEDpkIinbwmm" - }, - "outputs": [], "source": [ "import nemo.collections.asr as nemo_asr # use any domain's models !\n", "import nemo.collections.nlp as nemo_nlp # use any domain's models !\n", "import nemo.collections.tts as nemo_tts # use any domain's models !" - ] + ], + "metadata": { + "id": "ZEDpkIinbwmm" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "mLuQo1vnHVcP" - }, "source": [ "# Model Name\n", "\n", "NeMo adheres to strict requirements when naming a model for upload to NGC / Hugging Face Hub. \n", "\n", "It is **mandatory** to share the model name across the model card, the NeMo file itself. Otherwise NeMo model from Hugging Face will fail to restore correctly." - ] + ], + "metadata": { + "id": "mLuQo1vnHVcP" + } }, { "cell_type": "markdown", - "metadata": { - "id": "MRO2f9fhHywJ" - }, "source": [ "## Naming Convention\n", "\n", @@ -179,132 +192,130 @@ "As an example of the following model we will try today : \n", "\n", "`{task name}_{language id}_{model identifier}_[OPTIONAL modifiers]` = `stt_en_conformer_ctc_small`" - ] + ], + "metadata": { + "id": "MRO2f9fhHywJ" + } }, { "cell_type": "markdown", - "metadata": { - "id": "BjLstKWnPzWV" - }, "source": [ "**Set the MODEL_NAME carefully** !" - ] + ], + "metadata": { + "id": "BjLstKWnPzWV" + } }, { "cell_type": "code", - "execution_count": null, + "source": [ + "MODEL_NAME = \"stt_en_conformer_ctc_small\"" + ], "metadata": { "id": "UzHjXDbckU0M" }, - "outputs": [], - "source": [ - "MODEL_NAME = \"stt_en_conformer_ctc_small\"" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "qibj1RwvKjSQ" - }, "source": [ "-----\n", "**Restore a NeMo Model**\n", "\n", "Here, we restore a model from NGC directly, but you can restore a model from your training runs using `restore_from()` or use a local .nemo file." - ] + ], + "metadata": { + "id": "qibj1RwvKjSQ" + } }, { "cell_type": "code", - "execution_count": null, + "source": [ + "model = nemo_asr.models.ASRModel.from_pretrained(MODEL_NAME)" + ], "metadata": { "id": "MsC3pE65d_z2" }, - "outputs": [], - "source": [ - "model = nemo_asr.models.ASRModel.from_pretrained(MODEL_NAME)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "y1AkXPFVKfC2" - }, "source": [ "# Create a Hugging Face Model\n", "\n", "Now that we have a NeMo model and have logged into Hugging Face with our user API key, we can begin by creating a new repository and uploading our model." - ] + ], + "metadata": { + "id": "y1AkXPFVKfC2" + } }, { "cell_type": "markdown", - "metadata": { - "id": "iv17qFG7KzlL" - }, "source": [ "-----\n", "\n", "After the model has been restored, create an HfApi object to interact with the model repository." - ] + ], + "metadata": { + "id": "iv17qFG7KzlL" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "aJUXCOTjKy-2" - }, - "outputs": [], "source": [ "from huggingface_hub import HfApi\n", "api = HfApi()\n", "username = api.whoami()['name']" - ] + ], + "metadata": { + "id": "aJUXCOTjKy-2" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "DKRlMeaEkeAH" - }, - "outputs": [], "source": [ "try:\n", " api.create_repo(repo_id=MODEL_NAME)\n", " print(\"Successfully created repository !\")\n", "except Exception as e:\n", " print(\"Repository is possibly already created. Refer to error here - \\n\\n\", e)" - ] + ], + "metadata": { + "id": "DKRlMeaEkeAH" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "from huggingface_hub import Repository" + ], "metadata": { "id": "N2-deSyTlCdS" }, - "outputs": [], - "source": [ - "from huggingface_hub import Repository" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "aTa4RqDYLGMI" - }, "source": [ "Note two essential names - \n", "\n", "- `hf_model_name`: A string name that is the composite of your `username` and `MODEL_NAME` as set above. This name is used for multiple purposes, so keep track of it.\n", "\n", "- `model_filename`: The actual filename of the NeMo model that will be uploaded to Hugging Face. Note that this filename is explicitly set to `{MODEL_NAME}.nemo`. If this model filename is altered, then the model cannot correctly be restored by NeMo when downloaded from Hugging Face Hub, so please be careful." - ] + ], + "metadata": { + "id": "aTa4RqDYLGMI" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "xhTTMNpBskMS" - }, - "outputs": [], "source": [ "local_dir = f'model-{MODEL_NAME}/'\n", "hf_model_name = f'{username}/{MODEL_NAME}'\n", @@ -314,60 +325,62 @@ "\n", "with Repository(local_dir=local_dir, clone_from=hf_model_name, repo_type='model').commit(commit_message):\n", " model.save_to(model_filename)" - ] + ], + "metadata": { + "id": "xhTTMNpBskMS" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "print(\"Finished uploading model to :\", hf_model_name)" + ], "metadata": { "id": "BhvNp8MYvxLi" }, - "outputs": [], - "source": [ - "print(\"Finished uploading model to :\", hf_model_name)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "Qrs-MlW9vVbH" - }, "source": [ "## Test if the model works \n", "\n", "Now that we uploaded the model, let's try to use it in NeMo !\n", "\n", "The only change required between normally calling `from_pretrained(model_name)` is to call **`from_pretrained({username}/{filename})`**" - ] + ], + "metadata": { + "id": "Qrs-MlW9vVbH" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NyuyyRv5snkr" - }, - "outputs": [], "source": [ "hf_model_name = f'{username}/{MODEL_NAME}'\n", "hf_model = nemo_asr.models.ASRModel.from_pretrained(hf_model_name)" - ] + ], + "metadata": { + "id": "NyuyyRv5snkr" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "print(\"Successfully used HF model -\", hf_model_name)" + ], "metadata": { "id": "Yhi922WVv4G_" }, - "outputs": [], - "source": [ - "print(\"Successfully used HF model -\", hf_model_name)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "9gG1ElJywEJT" - }, "source": [ "# Model Card\n", "\n", @@ -376,40 +389,38 @@ "The next step is to update the model card to have some helpful information regarding the uploaded model and its scores compared to other models.\n", "\n", "You can do this in two ways, manually (by clicking the link below) or programmatically fill in part of the model card by following the instructions below." - ] + ], + "metadata": { + "id": "9gG1ElJywEJT" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "aZJRKoxhwBLr" - }, - "outputs": [], "source": [ "hf_url = f'https://huggingface.co/{username}/{MODEL_NAME}'\n", "print(f\"Visit {hf_url} to manually edit your model card\")" - ] + ], + "metadata": { + "id": "aZJRKoxhwBLr" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "ZlA4hNq6w4rH" - }, "source": [ "-----\n", "\n", "Here, we are going to setup some variables for our model card.\n", "\n", "First up are the tags:" - ] + ], + "metadata": { + "id": "ZlA4hNq6w4rH" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "QxKtPynWyUWX" - }, - "outputs": [], "source": [ "TAGS = [\n", " \"automatic-speech-recognition\", # Task id, refer to https://github.com/huggingface/datasets/blob/master/src/datasets/utils/resources/tasks.json for allowed values.\n", @@ -422,13 +433,15 @@ " \"pytorch\", # required, for toolkit identification\n", " # \"hf-asr-leaderboard\", # Should only be used if model is evaluated on benchmark scores for ASR.\n", "]" - ] + ], + "metadata": { + "id": "QxKtPynWyUWX" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "Fh7rYWEMM0Vz" - }, "source": [ "-----\n", "\n", @@ -437,15 +450,13 @@ "By convention, try to search if the dataset already exists on Hugging Face Datasets - it is usually listed at the top and in lower case.\n", "\n", "If you train on datasets that don't yet exist in Hugging Face Datasets, you can still add them but try to differentiate them by using capitalized names." - ] + ], + "metadata": { + "id": "Fh7rYWEMM0Vz" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qy-5aDAgzuGD" - }, - "outputs": [], "source": [ "# Replace all spaces with `-`\n", "DATASETS = [\n", @@ -462,26 +473,26 @@ " \"Europarl-ASR-(EN)\",\n", " \"Multilingual-LibriSpeech-(2000-hours)\",\n", "]" - ] + ], + "metadata": { + "id": "qy-5aDAgzuGD" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "_0w1X_z4NN5-" - }, "source": [ "-----\n", "\n", "Now we create an automated template based on a config for the top portion of the readme file." - ] + ], + "metadata": { + "id": "_0w1X_z4NN5-" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "O88WFyPJwjJD" - }, - "outputs": [], "source": [ "from dataclasses import dataclass, field\n", "from typing import List, Optional, Dict, Any\n", @@ -496,15 +507,15 @@ " thumbnail: Optional[str] = None\n", " tags: List[str] = field(default_factory=lambda: TAGS)\n", " model_index: Any = field(default_factory=lambda: [dict(name=MODEL_NAME, results=[])])" - ] + ], + "metadata": { + "id": "O88WFyPJwjJD" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "BpInrBdNxxZ3" - }, - "outputs": [], "source": [ "config = NeMoHuggingFaceModelConfig(language=['en'], license=\"cc-by-4.0\") # choose appropriate license here\n", "config = OmegaConf.structured(config)\n", @@ -519,28 +530,28 @@ " config['datasets'] = OmegaConf.create(normalized_datasets)\n", "\n", "print(OmegaConf.to_yaml(config))" - ] + ], + "metadata": { + "id": "BpInrBdNxxZ3" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "0TECX8QrC6FY" - }, "source": [ "## Markdown Template\n", "\n", "Now that we have an auto-generated header for our readme, next, we write down some template markdown for the actual contents of the markdown.\n", "\n", "You can edit the code here directly if you want, or if you prefer the GUI to see the actual changes in real-time, you can finish uploading this model card and then edit the readme file on the Hugging Face webpage itself." - ] + ], + "metadata": { + "id": "0TECX8QrC6FY" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "SSmm7_OiC9Ex" - }, - "outputs": [], "source": [ "hf_model_name = f'{username}/{MODEL_NAME}'\n", "\n", @@ -626,28 +637,28 @@ "[1] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo)\n", "\n", "\"\"\"" - ] + ], + "metadata": { + "id": "SSmm7_OiC9Ex" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "KPa53S_5NzNp" - }, "source": [ "-----\n", "\n", "Below, we will upload this model card in a temporary file called **`\"readme_template.md\"`**. This is done to prevent overwriting of the \"final\" model card that the user may have manually edited.\n", "\n", "Once this step is finished, **please copy the contents of this file, create a README.md file and paste the contents into it**." - ] + ], + "metadata": { + "id": "KPa53S_5NzNp" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0vk5KK4gzpSU" - }, - "outputs": [], "source": [ "local_dir = f'model-{MODEL_NAME}/'\n", "hf_model_name = f'{username}/{MODEL_NAME}'\n", @@ -662,75 +673,75 @@ " f.write(\"\\n---\\n\\n\")\n", " f.write(TEMPLATE)\n", " " - ] + ], + "metadata": { + "id": "0vk5KK4gzpSU" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "dfXoihCQmWDa" - }, "source": [ "-----\n", "\n", "Please visit the URL below to copy the contents of the `readme_template.md` file into your `README.md` file." - ] + ], + "metadata": { + "id": "dfXoihCQmWDa" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "but-5LuLTHFd" - }, - "outputs": [], "source": [ "hf_url = f'https://huggingface.co/{username}/{MODEL_NAME}'\n", "print(f\"Visit {hf_url} to edit your model card from the generated template file `{filename}`\")" - ] + ], + "metadata": { + "id": "but-5LuLTHFd" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "5vPEnlE62dGU" - }, "source": [ "## Evaluation Results\n", "\n", "Now that we have both the model checkpoint and the readme uploaded to the Hub, we can optionally add some evaluation results to the card as well!\n", "\n", "While this next section is optional, it is highly encouraged to do!" - ] + ], + "metadata": { + "id": "5vPEnlE62dGU" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rkXMtapA0YzH" - }, - "outputs": [], "source": [ "import evaluate\n", "# evaluate.list_evaluation_modules(module_type='metric', with_details=True)" - ] + ], + "metadata": { + "id": "rkXMtapA0YzH" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "50rzG9Qb3yLR" - }, - "outputs": [], "source": [ "# Uncomment in order to see what values you can supply to the `evaluate` library to push to the Hub.\n", "# help(evaluate.push_to_hub)" - ] + ], + "metadata": { + "id": "50rzG9Qb3yLR" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "5A4g3SGf4d0V" - }, - "outputs": [], "source": [ "hf_model_name = f'{username}/{MODEL_NAME}'\n", "metric_value = 8.1 # value obtained from https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/stt_en_conformer_ctc_small \n", @@ -748,36 +759,25 @@ " # the actual score obtained by the model\n", " metric_value=metric_value,\n", ")" - ] + ], + "metadata": { + "id": "5A4g3SGf4d0V" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "f3YYa7liO_m3" - }, "source": [ "-----\n", "\n", "Done! Now we have a model checkpoint, a model card as well as evaluation results all set up for the NeMo model on Hugging Face!\n", "\n", "To add more metrics, you can copy-paste the above cell and repeat the procedure for as many metrics as needed!" - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb", - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" + ], + "metadata": { + "id": "f3YYa7liO_m3" + } } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tutorials/VoiceSwapSample.ipynb b/tutorials/VoiceSwapSample.ipynb index 9981deabe2ec..016737f26a9f 100644 --- a/tutorials/VoiceSwapSample.ipynb +++ b/tutorials/VoiceSwapSample.ipynb @@ -329,4 +329,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb index e128c578e4f1..aad696e667b9 100644 --- a/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb +++ b/tutorials/asr/ASR_CTC_Language_Finetuning.ipynb @@ -1,12 +1,25 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "ASR_CTC_Language_Finetuning.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, "cells": [ { "cell_type": "code", - "execution_count": null, "metadata": { "id": "EGV_ioUHqhun" }, - "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -35,7 +48,9 @@ "that you want to use the \"Run All Cells\" (or similar) option.\n", "\"\"\"\n", "# exit()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -60,11 +75,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "1cjMaek4rY8-" }, - "outputs": [], "source": [ "import os\n", "import glob\n", @@ -73,15 +86,15 @@ "import wget\n", "import copy\n", "from omegaconf import OmegaConf, open_dict" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "8wqTRjpNruZD" }, - "outputs": [], "source": [ "data_dir = 'datasets/'\n", "\n", @@ -90,21 +103,23 @@ "\n", "if not os.path.exists(\"scripts\"):\n", " os.makedirs(\"scripts\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "TSTb6b5DriWG" }, - "outputs": [], "source": [ "import nemo\n", "import nemo.collections.asr as nemo_asr\n", "from nemo.collections.asr.metrics.wer import word_error_rate\n", "from nemo.utils import logging, exp_manager" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -123,52 +138,52 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "27h1i8qa7WFE" }, - "outputs": [], "source": [ "if not os.path.exists(\"scripts/get_commonvoice_data.py\"):\n", " !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/dataset_processing/get_commonvoice_data.py" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "x0i8hvt688hc" }, - "outputs": [], "source": [ "VERSION = \"cv-corpus-6.1-2020-12-11\"\n", "LANGUAGE = \"ja\"" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "-wI16qY_misb" }, - "outputs": [], "source": [ "tokenizer_dir = os.path.join('tokenizers', LANGUAGE)\n", "manifest_dir = os.path.join('manifests', LANGUAGE)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "bvOT_La2NNw1" }, - "outputs": [], "source": [ "# If something goes wrong during data processing, un-comment the following line to delete the cached dataset \n", "# !rm -rf datasets/$LANGUAGE\n", "!mkdir -p datasets" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -181,11 +196,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Inwx4OE97guu" }, - "outputs": [], "source": [ "!python scripts/get_commonvoice_data.py \\\n", " --data_root \"datasets/$LANGUAGE/\" \\\n", @@ -195,7 +208,9 @@ " --version=$VERSION \\\n", " --language=$LANGUAGE \\\n", " --files_to_process 'train.tsv' 'dev.tsv' 'test.tsv'" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -208,16 +223,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "j7WAGLX59C26" }, - "outputs": [], "source": [ "train_manifest = f\"{manifest_dir}/commonvoice_train_manifest.json\"\n", "dev_manifest = f\"{manifest_dir}/commonvoice_dev_manifest.json\"\n", "test_manifest = f\"{manifest_dir}/commonvoice_test_manifest.json\"" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -247,11 +262,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "EdkJYxUirp7C" }, - "outputs": [], "source": [ "# Manifest Utils\n", "from tqdm.auto import tqdm\n", @@ -279,20 +292,22 @@ " f.write(f\"{datum}\\n\")\n", " print(f\"Finished writing manifest: {filepath}\")\n", " return filepath" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "HngfzcwOijy4" }, - "outputs": [], "source": [ "train_manifest_data = read_manifest(train_manifest)\n", "dev_manifest_data = read_manifest(dev_manifest)\n", "test_manifest_data = read_manifest(test_manifest)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -305,16 +320,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "T2iwnvhXimfG" }, - "outputs": [], "source": [ "train_text = [data['text'] for data in train_manifest_data]\n", "dev_text = [data['text'] for data in dev_manifest_data]\n", "test_text = [data['text'] for data in test_manifest_data]" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -329,11 +344,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "XpUb_pI5imhh" }, - "outputs": [], "source": [ "from collections import defaultdict\n", "\n", @@ -344,20 +357,22 @@ " for character in text:\n", " charset[character] += 1\n", " return charset" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "obcPlrOJimju" }, - "outputs": [], "source": [ "train_charset = get_charset(train_manifest_data)\n", "dev_charset = get_charset(dev_manifest_data)\n", "test_charset = get_charset(test_manifest_data)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -370,27 +385,27 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Z8QVdph6imlz" }, - "outputs": [], "source": [ "train_dev_set = set.union(set(train_charset.keys()), set(dev_charset.keys()))\n", "test_set = set(test_charset.keys())" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "NgCfETWNimn3" }, - "outputs": [], "source": [ "print(f\"Number of tokens in train+dev set : {len(train_dev_set)}\")\n", "print(f\"Number of tokens in test set : {len(test_set)}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -431,11 +446,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "KPrBi35Cimqc" }, - "outputs": [], "source": [ "# OOV tokens in test set\n", "train_test_common = set.intersection(train_dev_set, test_set)\n", @@ -443,7 +456,9 @@ "print(f\"Number of OOV tokens in test set : {len(test_oov)}\")\n", "print()\n", "print(test_oov)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -471,11 +486,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "VDDiXCiPimr_" }, - "outputs": [], "source": [ "# Populate dictionary mapping count: list[tokens]\n", "train_counts = defaultdict(list)\n", @@ -486,7 +499,9 @@ "\n", "# Compute sorter order of the count keys\n", "count_keys = sorted(list(train_counts.keys()))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -499,11 +514,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "TJeVEKvAimwE" }, - "outputs": [], "source": [ "MAX_COUNT = 32\n", "\n", @@ -515,7 +528,9 @@ "\n", " TOKEN_COUNT_X.append(count)\n", " NUM_TOKENS_Y.append(num_tokens)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -528,11 +543,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "rKULANgINqbq" }, - "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", "\n", @@ -541,7 +554,9 @@ "plt.xlabel(\"# of occurances\")\n", "plt.ylabel(\"# of tokens\")\n", "plt.xlim(0, MAX_COUNT);" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -554,11 +569,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "9G6laS0ojV-B" }, - "outputs": [], "source": [ "UNCOMMON_TOKENS_COUNT = 5\n", "\n", @@ -569,7 +582,9 @@ " chars_with_infrequent_occurance.update(set(token_list))\n", "\n", "print(f\"Number of tokens with <= {UNCOMMON_TOKENS_COUNT} occurances : {len(chars_with_infrequent_occurance)}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -584,11 +599,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "jnh_pnL2jWAY" }, - "outputs": [], "source": [ "all_tokens = set.union(train_dev_set, test_set)\n", "print(f\"Original train+dev+test vocab size : {len(all_tokens)}\")\n", @@ -596,7 +609,9 @@ "extra_kanji = set(test_oov)\n", "train_token_set = all_tokens - extra_kanji\n", "print(f\"New train vocab size : {len(train_token_set)}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -624,40 +639,38 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { - "cellView": "form", - "id": "kaX9WzK15Q6t" + "id": "kaX9WzK15Q6t", + "cellView": "form" }, - "outputs": [], "source": [ "#@title Dakuten normalization\n", "perform_dakuten_normalization = True #@param [\"True\", \"False\"] {type:\"raw\"}\n", "PERFORM_DAKUTEN_NORMALIZATION = bool(perform_dakuten_normalization)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "HiEZVEshOp-y" }, - "outputs": [], "source": [ "import unicodedata\n", "def process_dakuten(text):\n", " normalized_text = unicodedata.normalize('NFD', text)\n", " normalized_text = normalized_text.replace(\"\\u3099\", \"\").replace(\"\\u309A\", \"\")\n", " return normalized_text" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "pV4kOgpvjWGg" }, - "outputs": [], "source": [ "if PERFORM_DAKUTEN_NORMALIZATION:\n", " normalized_train_token_set = set()\n", @@ -669,7 +682,9 @@ "else:\n", " normalized_train_token_set = train_token_set\n", " " - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -686,11 +701,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "NN3asqvsrp_S" }, - "outputs": [], "source": [ "# Preprocessing steps\n", "import re\n", @@ -714,7 +727,9 @@ " text = data['text']\n", " data['text'] = process_dakuten(text)\n", " return data" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -729,11 +744,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "mwNtHeHLjqJl" }, - "outputs": [], "source": [ "# Processing pipeline\n", "def apply_preprocessors(manifest, preprocessors):\n", @@ -743,15 +756,15 @@ "\n", " print(\"Finished processing manifest !\")\n", " return manifest" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "xB06YHmDr-Ja" }, - "outputs": [], "source": [ "# List of pre-processing functions\n", "PREPROCESSORS = [\n", @@ -759,15 +772,15 @@ " remove_extra_kanji,\n", " remove_dakuten,\n", "]" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "4lqUvpkrr7bQ" }, - "outputs": [], "source": [ "# Load manifests\n", "train_data = read_manifest(train_manifest)\n", @@ -783,7 +796,9 @@ "train_manifest_cleaned = write_processed_manifest(train_data_processed, train_manifest)\n", "dev_manifest_cleaned = write_processed_manifest(dev_data_processed, dev_manifest)\n", "test_manifest_cleaned = write_processed_manifest(test_data_processed, test_manifest)\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -798,11 +813,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "WpHk6HW6O0FW" }, - "outputs": [], "source": [ "train_manifest_data = read_manifest(train_manifest_cleaned)\n", "train_charset = get_charset(train_manifest_data)\n", @@ -811,18 +824,20 @@ "dev_charset = get_charset(dev_manifest_data)\n", "\n", "train_dev_set = set.union(set(train_charset.keys()), set(dev_charset.keys()))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "R3xkR4_dPd3C" }, - "outputs": [], "source": [ "print(f\"Number of tokens in preprocessed train+dev set : {len(train_dev_set)}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -839,14 +854,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "DlJmwh-iei77" }, - "outputs": [], "source": [ "char_model = nemo_asr.models.ASRModel.from_pretrained(\"stt_en_quartznet15x5\", map_location='cpu')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -861,14 +876,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "1VU-jfYLei9-" }, - "outputs": [], "source": [ "char_model.change_vocabulary(new_vocabulary=list(train_dev_set))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -891,16 +906,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "6PPDTaLyejAR" }, - "outputs": [], "source": [ "#@title Freeze Encoder { display-mode: \"form\" }\n", "freeze_encoder = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "freeze_encoder = bool(freeze_encoder)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -923,11 +938,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "1qiTTgDGejC9" }, - "outputs": [], "source": [ "import torch\n", "import torch.nn as nn\n", @@ -942,15 +955,15 @@ " m.train()\n", " for param in m.parameters():\n", " param.requires_grad_(True)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "9I5dx_GWejFm" }, - "outputs": [], "source": [ "if freeze_encoder:\n", " char_model.encoder.freeze()\n", @@ -959,7 +972,9 @@ "else:\n", " char_model.encoder.unfreeze()\n", " logging.info(\"Model encoder has been un-frozen\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -989,14 +1004,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "TBIy8p0fV7sa" }, - "outputs": [], "source": [ "char_model.cfg.labels = list(train_dev_set)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1009,14 +1024,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "pzpByrdfejIA" }, - "outputs": [], "source": [ "cfg = copy.deepcopy(char_model.cfg)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1033,11 +1048,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "KlQ5iGrZejKy" }, - "outputs": [], "source": [ "# Setup train, validation, test configs\n", "with open_dict(cfg): \n", @@ -1058,20 +1071,22 @@ " cfg.validation_ds.num_workers = 8\n", " cfg.validation_ds.pin_memory = True\n", " cfg.validation_ds.trim_silence = True" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "tx9DixV0ejMo" }, - "outputs": [], "source": [ "# setup data loaders with new configs\n", "char_model.setup_training_data(cfg.train_ds)\n", "char_model.setup_multiple_validation_data(cfg.validation_ds)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1089,23 +1104,21 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "MgoD5hOKYSKJ" }, - "outputs": [], "source": [ "# Original optimizer + scheduler\n", "print(OmegaConf.to_yaml(char_model.cfg.optim))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "okytaslHejOm" }, - "outputs": [], "source": [ "with open_dict(char_model.cfg.optim):\n", " char_model.cfg.optim.lr = 0.01\n", @@ -1114,7 +1127,9 @@ " char_model.cfg.optim.sched.warmup_steps = None # Remove default number of steps of warmup\n", " char_model.cfg.optim.sched.warmup_ratio = 0.05 # 5 % warmup\n", " char_model.cfg.optim.sched.min_lr = 1e-5" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1136,22 +1151,20 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "aJ6Md-dLejRA" }, - "outputs": [], "source": [ "print(OmegaConf.to_yaml(char_model.cfg.spec_augment))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "3ei9WsLzejTI" }, - "outputs": [], "source": [ "# with open_dict(char_model.cfg.spec_augment):\n", "# char_model.cfg.spec_augment.freq_masks = 2\n", @@ -1160,7 +1173,9 @@ "# char_model.cfg.spec_augment.time_width = 0.05\n", "\n", "char_model.spec_augmentation = char_model.from_config_dict(char_model.cfg.spec_augment)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1179,30 +1194,30 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { - "cellView": "form", - "id": "cN1FC0o2ejVg" + "id": "cN1FC0o2ejVg", + "cellView": "form" }, - "outputs": [], "source": [ "#@title Metric\n", "use_cer = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "log_prediction = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "HURZMpPwejXa" }, - "outputs": [], "source": [ "char_model._wer.use_cer = use_cer\n", "char_model._wer.log_prediction = log_prediction" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1219,11 +1234,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "eaw1qsQIf1Zv" }, - "outputs": [], "source": [ "import torch\n", "import pytorch_lightning as ptl\n", @@ -1249,15 +1262,15 @@ "\n", "# Finally, update the model's internal config\n", "char_model.cfg = char_model._cfg" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ENSpJJqcf1cG" }, - "outputs": [], "source": [ "# Environment variable generally used for multi-node multi-gpu training.\n", "# In notebook environments, this flag is unnecessary and can cause logs of multiple training runs to overwrite each other.\n", @@ -1277,15 +1290,15 @@ "config = OmegaConf.structured(config)\n", "\n", "logdir = exp_manager.exp_manager(trainer, config)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ATI2R0D7rylR" }, - "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -1299,19 +1312,21 @@ " %tensorboard --logdir /content/experiments/lang-$LANGUAGE/ASR-Char-Model-Language-$LANGUAGE/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "TvaESyJHf1eb" }, - "outputs": [], "source": [ "%%time\n", "trainer.fit(char_model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1361,27 +1376,27 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "yIUQklly9BPa" }, - "outputs": [], "source": [ "if not os.path.exists(\"scripts/process_asr_text_tokenizer.py\"):\n", " !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tokenizers/process_asr_text_tokenizer.py" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "SKA9rrpbm3nu" }, - "outputs": [], "source": [ "#@title Tokenizer Config { display-mode: \"form\" }\n", "TOKENIZER_TYPE = \"bpe\" #@param [\"bpe\", \"unigram\"]" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1396,15 +1411,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "lO_uskUEm2ZG" }, - "outputs": [], "source": [ "# << VOCAB SIZE can be changed to any value larger than (len(train_dev_set) + 2)! >>\n", "VOCAB_SIZE = len(train_dev_set) + 2" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1421,11 +1436,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "yT-SBPN2Ox6Y" }, - "outputs": [], "source": [ "!python scripts/process_asr_text_tokenizer.py \\\n", " --manifest=$train_manifest_cleaned,$dev_manifest_cleaned \\\n", @@ -1436,19 +1449,21 @@ " --spe_character_coverage=1.0 \\\n", " --no_lower_case \\\n", " --log" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "G5TxLHtKPW4E" }, - "outputs": [], "source": [ "TOKENIZER_DIR = f\"{tokenizer_dir}/tokenizer_spe_{TOKENIZER_TYPE}_v{VOCAB_SIZE}/\"\n", "print(\"Tokenizer directory :\", TOKENIZER_DIR)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1463,11 +1478,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "8sAz2_RyMu7J" }, - "outputs": [], "source": [ "# Number of tokens in tokenizer - \n", "with open(os.path.join(TOKENIZER_DIR, 'tokenizer.vocab')) as f:\n", @@ -1475,15 +1488,15 @@ "\n", "num_tokens = len(tokens)\n", "print(\"Number of tokens : \", num_tokens)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "zktPYPCxNXNO" }, - "outputs": [], "source": [ "if num_tokens < VOCAB_SIZE:\n", " print(\n", @@ -1491,7 +1504,9 @@ " f\"with vocab size = {VOCAB_SIZE}. Current number of tokens = {num_tokens}. \"\n", " f\"Please reconstruct the tokenizer with fewer tokens\"\n", " )" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1506,14 +1521,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "mmSj18iQQTZx" }, - "outputs": [], "source": [ "model = nemo_asr.models.ASRModel.from_pretrained(\"stt_en_citrinet_512\", map_location='cpu')" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1533,15 +1548,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "FmFQKwGkoaIx" }, - "outputs": [], "source": [ "# Preserve the decoder parameters in case weight matching can be done later\n", "pretrained_decoder = model.decoder.state_dict()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1556,14 +1571,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "-8SKfYSVorgg" }, - "outputs": [], "source": [ "model.change_vocabulary(new_tokenizer_dir=TOKENIZER_DIR, new_tokenizer_type=\"bpe\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1580,11 +1595,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "367FBtRDorkT" }, - "outputs": [], "source": [ "# Insert preserved model weights if shapes match\n", "if model.decoder.decoder_layers[0].weight.shape == pretrained_decoder['decoder_layers.0.weight'].shape:\n", @@ -1592,7 +1605,9 @@ " logging.info(\"Decoder shapes matched - restored weights from pre-trained model\")\n", "else:\n", " logging.info(\"\\nDecoder shapes did not match - could not restore decoder weights from pre-trained model.\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1607,24 +1622,22 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "lfDW0gQVpm4d" }, - "outputs": [], "source": [ "#@title Freeze Encoder { display-mode: \"form\" }\n", "freeze_encoder = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "freeze_encoder = bool(freeze_encoder)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "oLkm96zkplrX" }, - "outputs": [], "source": [ "if freeze_encoder:\n", " model.encoder.freeze()\n", @@ -1633,7 +1646,9 @@ "else:\n", " model.encoder.unfreeze()\n", " logging.info(\"Model encoder has been un-frozen\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1650,14 +1665,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "pBYAd_2-R2r3" }, - "outputs": [], "source": [ "cfg = copy.deepcopy(model.cfg)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1672,11 +1687,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "NfbtgTC-RyzF" }, - "outputs": [], "source": [ "# Setup new tokenizer\n", "cfg.tokenizer.dir = TOKENIZER_DIR\n", @@ -1684,7 +1697,9 @@ "\n", "# Set tokenizer config\n", "model.cfg.tokenizer = cfg.tokenizer" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1701,23 +1716,21 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "wnw-ygClmg7t" }, - "outputs": [], "source": [ "# Setup train/val/test configs\n", "print(OmegaConf.to_yaml(cfg.train_ds))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "OlOowK7rRAvs" }, - "outputs": [], "source": [ "# Setup train, validation, test configs\n", "with open_dict(cfg):\n", @@ -1744,21 +1757,23 @@ " cfg.test_ds.pin_memory = True\n", " cfg.test_ds.use_start_end_token = True\n", " cfg.test_ds.trim_silence = True" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "y98ZAhBtRtoD" }, - "outputs": [], "source": [ "# setup model with new configs\n", "model.setup_training_data(cfg.train_ds)\n", "model.setup_multiple_validation_data(cfg.validation_ds)\n", "model.setup_multiple_test_data(cfg.test_ds)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1782,11 +1797,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ozJDj6BktKw-" }, - "outputs": [], "source": [ "def analyse_ctc_failures_in_model(model):\n", " count_ctc_failures = 0\n", @@ -1823,52 +1836,52 @@ " model = model.train()\n", " \n", " return count_ctc_failures, am_seq_lengths, target_seq_lengths" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "hJGUcq2BtKzw" }, - "outputs": [], "source": [ "results = analyse_ctc_failures_in_model(model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "crEWxvI2tK2S" }, - "outputs": [], "source": [ "num_ctc_failures, am_seq_lengths, target_seq_lengths = results" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "L8M0-mSI1Jp5" }, - "outputs": [], "source": [ "if num_ctc_failures > 0:\n", " logging.warning(f\"\\nCTC loss will fail for {num_ctc_failures} samples ({num_ctc_failures * 100./ float(len(am_seq_lengths))} % of samples)!\\n\"\n", " f\"Increase the vocabulary size of the tokenizer so that this number becomes close to zero !\")\n", "else:\n", " logging.info(\"No CTC failure cases !\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "00wKre0W1Jsx" }, - "outputs": [], "source": [ "# Compute average ratio of T / U\n", "avg_T = sum(am_seq_lengths) / float(len(am_seq_lengths))\n", @@ -1883,7 +1896,9 @@ "print(f\"Average Target sequence length = {avg_U}\")\n", "print()\n", "print(f\"Ratio of Average AM sequence length to target sequence length = {avg_length_ratio}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1898,14 +1913,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "sS-xoplxSTJv" }, - "outputs": [], "source": [ "print(OmegaConf.to_yaml(cfg.optim))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1920,11 +1935,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Io55nnbdXoeG" }, - "outputs": [], "source": [ "with open_dict(model.cfg.optim):\n", " model.cfg.optim.lr = 0.025\n", @@ -1932,7 +1945,9 @@ " model.cfg.optim.sched.warmup_steps = None # Remove default number of steps of warmup\n", " model.cfg.optim.sched.warmup_ratio = 0.10 # 10 % warmup\n", " model.cfg.optim.sched.min_lr = 1e-9" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1947,11 +1962,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "6Vb35_oRh_sV" }, - "outputs": [], "source": [ "with open_dict(model.cfg.spec_augment):\n", " model.cfg.spec_augment.freq_masks = 2\n", @@ -1960,7 +1973,9 @@ " model.cfg.spec_augment.time_width = 0.05\n", "\n", "model.spec_augmentation = model.from_config_dict(model.cfg.spec_augment)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1975,30 +1990,30 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "cellView": "form", "id": "UfUlPXZS6vlV" }, - "outputs": [], "source": [ "#@title Metric\n", "use_cer = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "log_prediction = True #@param [\"False\", \"True\"] {type:\"raw\"}\n", "\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "6qpbMNZh68p9" }, - "outputs": [], "source": [ "model._wer.use_cer = use_cer\n", "model._wer.log_prediction = log_prediction" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2015,11 +2030,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "bonpx5sRS07M" }, - "outputs": [], "source": [ "import torch\n", "import pytorch_lightning as ptl\n", @@ -2045,15 +2058,15 @@ "\n", "# finally, update the model's internal config\n", "model.cfg = model._cfg" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "SR4CiViFS8Ww" }, - "outputs": [], "source": [ "from nemo.utils import exp_manager\n", "\n", @@ -2075,15 +2088,15 @@ "config = OmegaConf.structured(config)\n", "\n", "logdir = exp_manager.exp_manager(trainer, config)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "OlvyYwYWTsl6" }, - "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -2097,19 +2110,21 @@ " %tensorboard --logdir /content/experiments/lang-$LANGUAGE/ASR-Model-Language-$LANGUAGE/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "6X21Q2qfVLvG" }, - "outputs": [], "source": [ "%%time\n", "trainer.fit(model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2124,16 +2139,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "DoWNVNYGOaMX" }, - "outputs": [], "source": [ "save_path = f\"Model-{LANGUAGE}.nemo\"\n", "model.save_to(f\"{save_path}\")\n", "print(f\"Model saved at path : {os.getcwd() + os.path.sep + save_path}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -2148,20 +2163,5 @@ "While the focus was on a small dataset for Japanese, nearly all of this information can be used for larger datasets and other scenarios where compute is limited, or the model's size prevents fine-tuning the entire model." ] } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "ASR_CTC_Language_Finetuning.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tutorials/asr/ASR_for_telephony_speech.ipynb b/tutorials/asr/ASR_for_telephony_speech.ipynb index 79ee2d03226f..5be3b50502b3 100644 --- a/tutorials/asr/ASR_for_telephony_speech.ipynb +++ b/tutorials/asr/ASR_for_telephony_speech.ipynb @@ -340,4 +340,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/asr/ASR_with_NeMo.ipynb b/tutorials/asr/ASR_with_NeMo.ipynb index 4d5ad11b0cdd..519456a012af 100644 --- a/tutorials/asr/ASR_with_NeMo.ipynb +++ b/tutorials/asr/ASR_with_NeMo.ipynb @@ -1,12 +1,38 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "ASR_with_NeMo.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.7" + } + }, "cells": [ { "cell_type": "code", - "execution_count": null, "metadata": { "id": "lJz6FDU1lRzc" }, - "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -36,7 +62,9 @@ "that you want to use the \"Run All Cells\" (or similar) option.\n", "\"\"\"\n", "# exit()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -129,11 +157,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "gAhsmi6HlRzh" }, - "outputs": [], "source": [ "import os\n", "# This is where the an4/ directory will be placed.\n", @@ -142,16 +168,16 @@ "\n", "if not os.path.exists(data_dir):\n", " os.makedirs(data_dir)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Yb4fuUvWlRzk", "scrolled": true }, - "outputs": [], "source": [ "import glob\n", "import os\n", @@ -181,7 +207,9 @@ " cmd = [\"sox\", sph_path, wav_path]\n", " subprocess.run(cmd)\n", "print(\"Finished conversion.\\n******\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -196,11 +224,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "_M_bSs3MjQlz" }, - "outputs": [], "source": [ "import librosa\n", "import IPython.display as ipd\n", @@ -210,7 +236,9 @@ "audio, sample_rate = librosa.load(example_file)\n", "\n", "ipd.Audio(example_file, rate=sample_rate)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -225,11 +253,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "MqIAKkqelRzm" }, - "outputs": [], "source": [ "%matplotlib inline\n", "import librosa.display\n", @@ -241,7 +267,9 @@ "plt.ylabel('Amplitude')\n", "\n", "_ = librosa.display.waveshow(audio)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -263,11 +291,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "oCFneEs1lRzp" }, - "outputs": [], "source": [ "import numpy as np\n", "\n", @@ -279,7 +305,9 @@ "librosa.display.specshow(spec_db, y_axis='log', x_axis='time')\n", "plt.colorbar()\n", "plt.title('Audio Spectrogram');" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -296,11 +324,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "7yQXVn-TlRzt" }, - "outputs": [], "source": [ "# Plot the mel spectrogram of our sample\n", "mel_spec = librosa.feature.melspectrogram(audio, sr=sample_rate)\n", @@ -310,7 +336,9 @@ " mel_spec_db, x_axis='time', y_axis='mel')\n", "plt.colorbar()\n", "plt.title('Mel Spectrogram');" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -355,18 +383,18 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "4_W0lhaQlRzx" }, - "outputs": [], "source": [ "# NeMo's \"core\" package\n", "import nemo\n", "# NeMo's ASR collection - this collections contains complete ASR models and\n", "# building blocks (modules) for ASR\n", "import nemo.collections.asr as nemo_asr" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -381,15 +409,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "KFZZpYult96G" }, - "outputs": [], "source": [ "# This line will download pre-trained QuartzNet15x5 model from NVIDIA's NGC cloud and instantiate it for you\n", "quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name=\"QuartzNet15x5Base-En\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -402,16 +430,16 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "3QCpR_93u1hp" }, - "outputs": [], "source": [ "files = [os.path.join(data_dir, 'an4/wav/an4_clstk/mgah/cen2-mgah-b.wav')]\n", "for fname, transcription in zip(files, quartznet.transcribe(paths2audio_files=files)):\n", " print(f\"Audio in {fname} was recognized as: {transcription}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -459,11 +487,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "lVB1sG1GlRzz" }, - "outputs": [], "source": [ "# --- Building Manifest Files --- #\n", "import json\n", @@ -510,7 +536,9 @@ " build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk')\n", " print(\"Test manifest created.\")\n", "print(\"***Done***\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -546,11 +574,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "PXVKBniMlRz5" }, - "outputs": [], "source": [ "# --- Config Information ---#\n", "try:\n", @@ -569,7 +595,9 @@ "with open(config_path) as f:\n", " params = yaml.load(f)\n", "print(params)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -586,15 +614,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "GUfR6tAK0k2u" }, - "outputs": [], "source": [ "import pytorch_lightning as pl\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=50)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -608,17 +636,17 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Cbf0fsMK09lk" }, - "outputs": [], "source": [ "from omegaconf import DictConfig\n", "params['model']['train_ds']['manifest_filepath'] = train_manifest\n", "params['model']['validation_ds']['manifest_filepath'] = test_manifest\n", "first_asr_model = nemo_asr.models.EncDecCTCModel(cfg=DictConfig(params['model']), trainer=trainer)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -631,15 +659,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "inRJsnrz1psq" }, - "outputs": [], "source": [ "# Start training!!!\n", "trainer.fit(first_asr_model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -657,11 +685,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "n_0y3stSXDX_" }, - "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -675,7 +701,9 @@ " %tensorboard --logdir lightning_logs/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -688,14 +716,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "7kdQbpohXnEd" }, - "outputs": [], "source": [ "print(params['model']['optim'])" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -708,18 +736,18 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "AbigFKUtYgvn" }, - "outputs": [], "source": [ "import copy\n", "new_opt = copy.deepcopy(params['model']['optim'])\n", "new_opt['lr'] = 0.001\n", "first_asr_model.setup_optimization(optim_config=DictConfig(new_opt))\n", "# And then you can invoke trainer.fit(first_asr_model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -736,11 +764,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "3FT0klSV268p" }, - "outputs": [], "source": [ "paths2audio_files = [os.path.join(data_dir, 'an4/wav/an4_clstk/mgah/cen2-mgah-b.wav'),\n", " os.path.join(data_dir, 'an4/wav/an4_clstk/fmjd/cen7-fmjd-b.wav'),\n", @@ -748,7 +774,9 @@ " os.path.join(data_dir, 'an4/wav/an4_clstk/fkai/cen8-fkai-b.wav')]\n", "print(first_asr_model.transcribe(paths2audio_files=paths2audio_files,\n", " batch_size=4))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -761,11 +789,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "7mP4r1Gx_Ilt" }, - "outputs": [], "source": [ "# Bigger batch-size = bigger throughput\n", "params['model']['validation_ds']['batch_size'] = 16\n", @@ -804,7 +830,9 @@ "\n", "# We need to sum all numerators and denominators first. Then divide.\n", "print(f\"WER = {sum(wer_nums)/sum(wer_denoms)}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -836,14 +864,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "9glGogaPlR0H" }, - "outputs": [], "source": [ "print(quartznet._cfg['spec_augment'])" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -871,11 +899,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "hl320dsydWX0" }, - "outputs": [], "source": [ "# Check what kind of vocabulary/alphabet the model has right now\n", "print(quartznet.decoder.vocabulary)\n", @@ -888,7 +914,9 @@ " 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', \"'\", \"!\"\n", " ]\n", ")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -901,11 +929,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "_PZJIso-eDl-" }, - "outputs": [], "source": [ "# Use the smaller learning rate we set before\n", "quartznet.setup_optimization(optim_config=DictConfig(new_opt))\n", @@ -919,7 +945,9 @@ "# And now we can create a PyTorch Lightning trainer and call `fit` again.\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=2)\n", "trainer.fit(quartznet)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -964,11 +992,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "I4WRcmakjQnj" }, - "outputs": [], "source": [ "!pip install --upgrade onnxruntime # for gpu, use onnxruntime-gpu\n", "#!mkdir -p ort\n", @@ -980,7 +1006,9 @@ "#!pip uninstall -y onnxruntime-gpu\n", "#!pip install --upgrade --force-reinstall ./build/Linux/Release/dist/onnxruntime*.whl\n", "#%cd .." - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -993,11 +1021,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "HZnyWxPyjQnm" }, - "outputs": [], "source": [ "import json\n", "import os\n", @@ -1074,7 +1100,9 @@ " hypotheses, _ = wer.decoding.ctc_decoder_predictions_tensor(greedy_predictions)\n", " print(hypotheses)\n", " break\n" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1136,40 +1164,12 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "V3ERGX86lR0V" }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "ASR_with_NeMo.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.7" + "source": [], + "execution_count": null, + "outputs": [] } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb index 7402931ece05..50e4f4536908 100644 --- a/tutorials/asr/ASR_with_Subword_Tokenization.ipynb +++ b/tutorials/asr/ASR_with_Subword_Tokenization.ipynb @@ -1,12 +1,26 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "ASR_with_Subword_Tokenization.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3", + "language": "python" + }, + "accelerator": "GPU" + }, "cells": [ { "cell_type": "code", - "execution_count": null, "metadata": { "id": "HqBQwLAsme9b" }, - "outputs": [], "source": [ "\"\"\"\n", "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", @@ -39,7 +53,9 @@ "that you want to use the \"Run All Cells\" (or similar) option.\n", "\"\"\"\n", "# exit()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -47,10 +63,10 @@ "id": "jW8pMLX4EKb0" }, "source": [ - "# Automatic Speech Recognition with Subword Tokenization\n", - "\n", - "In the [ASR with NeMo notebook](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb), we discuss the pipeline necessary for Automatic Speech Recognition (ASR), and then use the NeMo toolkit to construct a functioning speech recognition model.\n", - "\n", + "# Automatic Speech Recognition with Subword Tokenization\r\n", + "\r\n", + "In the [ASR with NeMo notebook](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/asr/ASR_with_NeMo.ipynb), we discuss the pipeline necessary for Automatic Speech Recognition (ASR), and then use the NeMo toolkit to construct a functioning speech recognition model.\r\n", + "\r\n", "In this notebook, we take a step further and look into subword tokenization as a useful encoding scheme for ASR models, and why they are necessary. We then construct a custom tokenizer from the dataset, and use it to construct and train an ASR model on the [AN4 dataset from CMU](http://www.speech.cs.cmu.edu/databases/an4/) (with processing using `sox`)." ] }, @@ -60,16 +76,16 @@ "id": "w2pDg6jJLLVM" }, "source": [ - "## Subword Tokenization\n", - "\n", - "We begin with a short intro to what exactly is subword tokenization. If you are familiar with some Natural Language Processing terminologies, then you might have heard of the term \"subword\" frequently.\n", - "\n", - "So what is a subword in the first place? Simply put, it is either a single character or a group of characters. When combined according to a tokenization-detokenization algorithm, it generates a set of characters, words, or entire sentences. \n", - "\n", - "Many subword tokenization-detokenization algorithms exist, which can be built using large corpora of text data to tokenize and detokenize the data to and from subwords effectively. Some of the most commonly used subword tokenization methods are [Byte Pair Encoding](https://arxiv.org/abs/1508.07909), [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) and [Sentence Piece Encoding](https://www.aclweb.org/anthology/D18-2012/), to name just a few.\n", - "\n", - "------\n", - "\n", + "## Subword Tokenization\r\n", + "\r\n", + "We begin with a short intro to what exactly is subword tokenization. If you are familiar with some Natural Language Processing terminologies, then you might have heard of the term \"subword\" frequently.\r\n", + "\r\n", + "So what is a subword in the first place? Simply put, it is either a single character or a group of characters. When combined according to a tokenization-detokenization algorithm, it generates a set of characters, words, or entire sentences. \r\n", + "\r\n", + "Many subword tokenization-detokenization algorithms exist, which can be built using large corpora of text data to tokenize and detokenize the data to and from subwords effectively. Some of the most commonly used subword tokenization methods are [Byte Pair Encoding](https://arxiv.org/abs/1508.07909), [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf) and [Sentence Piece Encoding](https://www.aclweb.org/anthology/D18-2012/), to name just a few.\r\n", + "\r\n", + "------\r\n", + "\r\n", "Here, we will show a short demo on why subword tokenization is necessary for Automatic Speech Recognition under certain situations and its benefits to the model in terms of efficiency and accuracy." ] }, @@ -84,17 +100,17 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "M_MQ7NLlBbup" }, - "outputs": [], "source": [ - "TEXT_CORPUS = [\n", - " \"hello world\",\n", - " \"today is a good day\",\n", + "TEXT_CORPUS = [\r\n", + " \"hello world\",\r\n", + " \"today is a good day\",\r\n", "]" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -107,23 +123,23 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "3tusMof9QMs7" }, - "outputs": [], "source": [ - "def char_tokenize(text):\n", - " tokens = []\n", - " for char in text:\n", - " tokens.append(ord(char))\n", - " return tokens\n", - "\n", - "def char_detokenize(tokens):\n", - " tokens = [chr(t) for t in tokens]\n", - " text = \"\".join(tokens)\n", + "def char_tokenize(text):\r\n", + " tokens = []\r\n", + " for char in text:\r\n", + " tokens.append(ord(char))\r\n", + " return tokens\r\n", + "\r\n", + "def char_detokenize(tokens):\r\n", + " tokens = [chr(t) for t in tokens]\r\n", + " text = \"\".join(tokens)\r\n", " return text" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -136,17 +152,17 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "2stpuRsNQpMJ" }, - "outputs": [], "source": [ - "char_tokens = char_tokenize(TEXT_CORPUS[0])\n", - "print(\"Tokenized tokens :\", char_tokens)\n", - "text = char_detokenize(char_tokens)\n", + "char_tokens = char_tokenize(TEXT_CORPUS[0])\r\n", + "print(\"Tokenized tokens :\", char_tokens)\r\n", + "text = char_detokenize(char_tokens)\r\n", "print(\"Detokenized text :\", text)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -154,48 +170,48 @@ "id": "gY6G6Ow1RSf4" }, "source": [ - "-----\n", - "Great! The character tokenizer did its job correctly - each character is separated as an individual token, and they can be reconstructed into precisely the original text!\n", - "\n", + "-----\r\n", + "Great! The character tokenizer did its job correctly - each character is separated as an individual token, and they can be reconstructed into precisely the original text!\r\n", + "\r\n", "Now let's create a simple dictionary-based tokenizer - it will have a select set of subwords that it will use to map tokens back and forth. Note - to simplify the technique's demonstration; we will use a vocabulary with entire words. However, note that this is an uncommon occurrence unless the vocabulary sizes are huge when built on natural text." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Mhn2MxODRNTv" }, - "outputs": [], "source": [ - "def dict_tokenize(text, vocabulary):\n", - " tokens = []\n", - "\n", - " # first do full word searches\n", - " split_text = text.split()\n", - " for split in split_text:\n", - " if split in vocabulary:\n", - " tokens.append(vocabulary[split])\n", - " else:\n", - " chars = list(split)\n", - " t_chars = [vocabulary[c] for c in chars]\n", - " tokens.extend(t_chars)\n", - " tokens.append(vocabulary[\" \"])\n", - "\n", - " # remove extra space token\n", - " tokens.pop(-1)\n", - " return tokens\n", - "\n", - "def dict_detokenize(tokens, vocabulary):\n", - " text = \"\"\n", - " reverse_vocab = {v: k for k, v in vocabulary.items()}\n", - " for token in tokens:\n", - " if token in reverse_vocab:\n", - " text = text + reverse_vocab[token]\n", - " else:\n", - " text = text + \"\".join(token)\n", + "def dict_tokenize(text, vocabulary):\r\n", + " tokens = []\r\n", + "\r\n", + " # first do full word searches\r\n", + " split_text = text.split()\r\n", + " for split in split_text:\r\n", + " if split in vocabulary:\r\n", + " tokens.append(vocabulary[split])\r\n", + " else:\r\n", + " chars = list(split)\r\n", + " t_chars = [vocabulary[c] for c in chars]\r\n", + " tokens.extend(t_chars)\r\n", + " tokens.append(vocabulary[\" \"])\r\n", + "\r\n", + " # remove extra space token\r\n", + " tokens.pop(-1)\r\n", + " return tokens\r\n", + "\r\n", + "def dict_detokenize(tokens, vocabulary):\r\n", + " text = \"\"\r\n", + " reverse_vocab = {v: k for k, v in vocabulary.items()}\r\n", + " for token in tokens:\r\n", + " if token in reverse_vocab:\r\n", + " text = text + reverse_vocab[token]\r\n", + " else:\r\n", + " text = text + \"\".join(token)\r\n", " return text" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -208,34 +224,34 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "rone69s8Ui3q" }, - "outputs": [], "source": [ - "vocabulary = {chr(i + ord(\"a\")) : (i + 1) for i in range(26)}\n", - "# add whole words and special tokens\n", - "vocabulary[\" \"] = 0\n", - "vocabulary[\"hello\"] = len(vocabulary) + 1\n", - "vocabulary[\"today\"] = len(vocabulary) + 1\n", - "vocabulary[\"good\"] = len(vocabulary) + 1\n", + "vocabulary = {chr(i + ord(\"a\")) : (i + 1) for i in range(26)}\r\n", + "# add whole words and special tokens\r\n", + "vocabulary[\" \"] = 0\r\n", + "vocabulary[\"hello\"] = len(vocabulary) + 1\r\n", + "vocabulary[\"today\"] = len(vocabulary) + 1\r\n", + "vocabulary[\"good\"] = len(vocabulary) + 1\r\n", "print(vocabulary)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "sGLGaLtXUgrN" }, - "outputs": [], "source": [ - "dict_tokens = dict_tokenize(TEXT_CORPUS[0], vocabulary)\n", - "print(\"Tokenized tokens :\", dict_tokens)\n", - "text = dict_detokenize(dict_tokens, vocabulary)\n", + "dict_tokens = dict_tokenize(TEXT_CORPUS[0], vocabulary)\r\n", + "print(\"Tokenized tokens :\", dict_tokens)\r\n", + "text = dict_detokenize(dict_tokens, vocabulary)\r\n", "print(\"Detokenized text :\", text)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -243,25 +259,25 @@ "id": "rUETSbM-XYUl" }, "source": [ - "------\n", - "Great! Our dictionary tokenizer works well and tokenizes-detokenizes the data correctly.\n", - "\n", - "You might be wondering - why did we have to go through all this trouble to tokenize and detokenize data if we get back the same thing?\n", - "\n", + "------\r\n", + "Great! Our dictionary tokenizer works well and tokenizes-detokenizes the data correctly.\r\n", + "\r\n", + "You might be wondering - why did we have to go through all this trouble to tokenize and detokenize data if we get back the same thing?\r\n", + "\r\n", "For ASR - the hidden benefit lies in the length of the tokenized representation!" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "eZFGuLqUVhLW" }, - "outputs": [], "source": [ - "print(\"Character tokenization length -\", len(char_tokens))\n", + "print(\"Character tokenization length -\", len(char_tokens))\r\n", "print(\"Dict tokenization length -\", len(dict_tokens))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -269,8 +285,8 @@ "id": "vw6jJD8eYJpK" }, "source": [ - "By having the whole word \"hello\" in our tokenizer's dictionary, we could reduce the length of the tokenized data by four tokens and still represent the same information!\n", - "\n", + "By having the whole word \"hello\" in our tokenizer's dictionary, we could reduce the length of the tokenized data by four tokens and still represent the same information!\r\n", + "\r\n", "Actual subword algorithms like the ones discussed above go several steps further - they partition whole words based on occurrence in text and build tokens for them too! So instead of wasting 5 tokens for `[\"h\", \"e\", \"l\", \"l\", \"o\"]`, we can represent it as `[\"hel##\", \"##lo\"]` and then merge the `##` tokens together to get back `hello` by using just 2 tokens !" ] }, @@ -280,25 +296,25 @@ "id": "hcCbVA3GY-TZ" }, "source": [ - "## The necessity of subword tokenization\n", - "\n", - "It has been found via extensive research in the domain of Neural Machine Translation and Language Modelling (and its variants), that subword tokenization not only reduces the length of the tokenized representation (thereby making sentences shorter and more manageable for models to learn), but also boosts the accuracy of prediction of correct tokens (refer to the earlier cited papers).\n", - "\n", - "You might remember that earlier; we mentioned subword tokenization as a necessity rather than just a nice-to-have component for ASR. In the previous tutorial, we used the [Connectionist Temporal Classification](https://www.cs.toronto.edu/~graves/icml_2006.pdf) loss function to train the model, but this loss function has a few limitations- \n", - "\n", - " - **Generated tokens are conditionally independent of each other**. In other words - the probability of character \"l\" being predicted after \"hel##\" is conditionally independent of the previous token - so any other token can also be predicted unless the model has future information!\n", - " - **The length of the generated (target) sequence must be shorter than that of the source sequence.** \n", - "\n", - "------\n", - "\n", - "It turns out - subword tokenization helps alleviate both of these issues!\n", - "\n", - " - Sophisticated subword tokenization algorithms build their vocabularies based on large text corpora. To accurately tokenize such large volumes of text with minimal vocabulary size, the subwords that are learned inherently model the interdependency between tokens of that language to some degree. \n", - " \n", - "Looking at the previous example, the token `hel##` is a single token that represents the relationship `h` => `e` => `l`. When the model predicts the singe token `hel##`, it implicitly predicts this relationship - even though the subsequent token can be either `l` (for `hell`) or `##lo` (for `hello`) and is predicted independently of the previous token!\n", - "\n", - " - By reducing the target sentence length by subword tokenization (target sentence here being the characters/subwords transcribed from the audio signal), we entirely sidestep the sequence length limitation of CTC loss!\n", - "\n", + "## The necessity of subword tokenization\r\n", + "\r\n", + "It has been found via extensive research in the domain of Neural Machine Translation and Language Modelling (and its variants), that subword tokenization not only reduces the length of the tokenized representation (thereby making sentences shorter and more manageable for models to learn), but also boosts the accuracy of prediction of correct tokens (refer to the earlier cited papers).\r\n", + "\r\n", + "You might remember that earlier; we mentioned subword tokenization as a necessity rather than just a nice-to-have component for ASR. In the previous tutorial, we used the [Connectionist Temporal Classification](https://www.cs.toronto.edu/~graves/icml_2006.pdf) loss function to train the model, but this loss function has a few limitations- \r\n", + "\r\n", + " - **Generated tokens are conditionally independent of each other**. In other words - the probability of character \"l\" being predicted after \"hel##\" is conditionally independent of the previous token - so any other token can also be predicted unless the model has future information!\r\n", + " - **The length of the generated (target) sequence must be shorter than that of the source sequence.** \r\n", + "\r\n", + "------\r\n", + "\r\n", + "It turns out - subword tokenization helps alleviate both of these issues!\r\n", + "\r\n", + " - Sophisticated subword tokenization algorithms build their vocabularies based on large text corpora. To accurately tokenize such large volumes of text with minimal vocabulary size, the subwords that are learned inherently model the interdependency between tokens of that language to some degree. \r\n", + " \r\n", + "Looking at the previous example, the token `hel##` is a single token that represents the relationship `h` => `e` => `l`. When the model predicts the singe token `hel##`, it implicitly predicts this relationship - even though the subsequent token can be either `l` (for `hell`) or `##lo` (for `hello`) and is predicted independently of the previous token!\r\n", + "\r\n", + " - By reducing the target sentence length by subword tokenization (target sentence here being the characters/subwords transcribed from the audio signal), we entirely sidestep the sequence length limitation of CTC loss!\r\n", + "\r\n", "This means we can perform a larger number of pooling steps in our acoustic models, thereby improving execution speed while simultaneously reducing memory requirements." ] }, @@ -308,8 +324,8 @@ "id": "KAFSGJRAeTe6" }, "source": [ - "# Building a custom subword tokenizer\n", - "\n", + "# Building a custom subword tokenizer\r\n", + "\r\n", "After all that talk about subword tokenization, let's finally build a custom tokenizer for our ASR model! While the `AN4` dataset is simple enough to be trained using character-based models, its small size is also perfect for a demonstration on a notebook." ] }, @@ -319,64 +335,64 @@ "id": "Ire6cSmEe2GU" }, "source": [ - "## Preparing the dataset (AN4)\n", - "\n", - "The AN4 dataset, also known as the Alphanumeric dataset, was collected and published by Carnegie Mellon University. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, and their corresponding transcripts. We choose to use AN4 for this tutorial because it is relatively small, with 948 training and 130 test utterances, and so it trains quickly.\n", - "\n", + "## Preparing the dataset (AN4)\r\n", + "\r\n", + "The AN4 dataset, also known as the Alphanumeric dataset, was collected and published by Carnegie Mellon University. It consists of recordings of people spelling out addresses, names, telephone numbers, etc., one letter or number at a time, and their corresponding transcripts. We choose to use AN4 for this tutorial because it is relatively small, with 948 training and 130 test utterances, and so it trains quickly.\r\n", + "\r\n", "Before we get started, let's download and prepare the dataset. The utterances are available as `.sph` files, so we will need to convert them to `.wav` for later processing. If you are not using Google Colab, please make sure you have [Sox](http://sox.sourceforge.net/) installed for this step--see the \"Downloads\" section of the linked Sox homepage. (If you are using Google Colab, Sox should have already been installed in the setup cell at the beginning.)" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "dLB_KedzYHCw" }, - "outputs": [], "source": [ "# This is where the an4/ directory will be placed.\n", "# Change this if you don't want the data to be extracted in the current directory.\n", "# The directory should exist.\n", "data_dir = \".\"" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "AsHdRslhe-7W" }, - "outputs": [], "source": [ - "import glob\n", - "import os\n", - "import subprocess\n", - "import tarfile\n", - "import wget\n", - "\n", - "# Download the dataset. This will take a few moments...\n", - "print(\"******\")\n", - "if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):\n", - " an4_url = 'https://dldata-public.s3.us-east-2.amazonaws.com/an4_sphere.tar.gz' # for the original source, please visit http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz \n", - " an4_path = wget.download(an4_url, data_dir)\n", - " print(f\"Dataset downloaded at: {an4_path}\")\n", - "else:\n", - " print(\"Tarfile already exists.\")\n", - " an4_path = data_dir + '/an4_sphere.tar.gz'\n", - "\n", - "if not os.path.exists(data_dir + '/an4/'):\n", - " # Untar and convert .sph to .wav (using sox)\n", - " tar = tarfile.open(an4_path)\n", - " tar.extractall(path=data_dir)\n", - "\n", - " print(\"Converting .sph to .wav...\")\n", - " sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)\n", - " for sph_path in sph_list:\n", - " wav_path = sph_path[:-4] + '.wav'\n", - " cmd = [\"sox\", sph_path, wav_path]\n", - " subprocess.run(cmd)\n", + "import glob\r\n", + "import os\r\n", + "import subprocess\r\n", + "import tarfile\r\n", + "import wget\r\n", + "\r\n", + "# Download the dataset. This will take a few moments...\r\n", + "print(\"******\")\r\n", + "if not os.path.exists(data_dir + '/an4_sphere.tar.gz'):\r\n", + " an4_url = 'https://dldata-public.s3.us-east-2.amazonaws.com/an4_sphere.tar.gz' # for the original source, please visit http://www.speech.cs.cmu.edu/databases/an4/an4_sphere.tar.gz \r\n", + " an4_path = wget.download(an4_url, data_dir)\r\n", + " print(f\"Dataset downloaded at: {an4_path}\")\r\n", + "else:\r\n", + " print(\"Tarfile already exists.\")\r\n", + " an4_path = data_dir + '/an4_sphere.tar.gz'\r\n", + "\r\n", + "if not os.path.exists(data_dir + '/an4/'):\r\n", + " # Untar and convert .sph to .wav (using sox)\r\n", + " tar = tarfile.open(an4_path)\r\n", + " tar.extractall(path=data_dir)\r\n", + "\r\n", + " print(\"Converting .sph to .wav...\")\r\n", + " sph_list = glob.glob(data_dir + '/an4/**/*.sph', recursive=True)\r\n", + " for sph_path in sph_list:\r\n", + " wav_path = sph_path[:-4] + '.wav'\r\n", + " cmd = [\"sox\", sph_path, wav_path]\r\n", + " subprocess.run(cmd)\r\n", "print(\"Finished conversion.\\n******\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -384,7 +400,7 @@ "id": "6kOuy-OWfUWn" }, "source": [ - "You should now have a folder called `an4` that contains `etc/an4_train.transcription`, `etc/an4_test.transcription`, audio files in `wav/an4_clstk` and `wav/an4test_clstk`, along with some other files we will not need.\n" + "You should now have a folder called `an4` that contains `etc/an4_train.transcription`, `etc/an4_test.transcription`, audio files in `wav/an4_clstk` and `wav/an4test_clstk`, along with some other files we will not need.\r\n" ] }, { @@ -393,79 +409,79 @@ "id": "S2S--I3kftF0" }, "source": [ - "## Creating Data Manifests\n", - "\n", - "The first thing we need to do now is to create manifests for our training and evaluation data, which will contain the metadata of our audio files. NeMo data sets take in a standardized manifest format where each line corresponds to one sample of audio, such that the number of lines in a manifest is equal to the number of samples that are represented by that manifest. A line must contain the path to an audio file, the corresponding transcript (or path to a transcript file), and the duration of the audio sample.\n", - "\n", - "Here's an example of what one line in a NeMo-compatible manifest might look like:\n", - "```\n", - "{\"audio_filepath\": \"path/to/audio.wav\", \"duration\": 3.45, \"text\": \"this is a nemo tutorial\"}\n", - "```\n", - "\n", - "We can build our training and evaluation manifests using `an4/etc/an4_train.transcription` and `an4/etc/an4_test.transcription`, which have lines containing transcripts and their corresponding audio file IDs:\n", - "```\n", - "...\n", - " P I T T S B U R G H (cen5-fash-b)\n", - " TWO SIX EIGHT FOUR FOUR ONE EIGHT (cen7-fash-b)\n", - "...\n", + "## Creating Data Manifests\r\n", + "\r\n", + "The first thing we need to do now is to create manifests for our training and evaluation data, which will contain the metadata of our audio files. NeMo data sets take in a standardized manifest format where each line corresponds to one sample of audio, such that the number of lines in a manifest is equal to the number of samples that are represented by that manifest. A line must contain the path to an audio file, the corresponding transcript (or path to a transcript file), and the duration of the audio sample.\r\n", + "\r\n", + "Here's an example of what one line in a NeMo-compatible manifest might look like:\r\n", + "```\r\n", + "{\"audio_filepath\": \"path/to/audio.wav\", \"duration\": 3.45, \"text\": \"this is a nemo tutorial\"}\r\n", + "```\r\n", + "\r\n", + "We can build our training and evaluation manifests using `an4/etc/an4_train.transcription` and `an4/etc/an4_test.transcription`, which have lines containing transcripts and their corresponding audio file IDs:\r\n", + "```\r\n", + "...\r\n", + " P I T T S B U R G H (cen5-fash-b)\r\n", + " TWO SIX EIGHT FOUR FOUR ONE EIGHT (cen7-fash-b)\r\n", + "...\r\n", "```" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "sFyGsk80fRp7" }, - "outputs": [], "source": [ - "# --- Building Manifest Files --- #\n", - "import json\n", - "import librosa\n", - "\n", - "# Function to build a manifest\n", - "def build_manifest(transcripts_path, manifest_path, wav_path):\n", - " with open(transcripts_path, 'r') as fin:\n", - " with open(manifest_path, 'w') as fout:\n", - " for line in fin:\n", - " # Lines look like this:\n", - " # transcript (fileID)\n", - " transcript = line[: line.find('(')-1].lower()\n", - " transcript = transcript.replace('', '').replace('', '')\n", - " transcript = transcript.strip()\n", - "\n", - " file_id = line[line.find('(')+1 : -2] # e.g. \"cen4-fash-b\"\n", - " audio_path = os.path.join(\n", - " data_dir, wav_path,\n", - " file_id[file_id.find('-')+1 : file_id.rfind('-')],\n", - " file_id + '.wav')\n", - "\n", - " duration = librosa.core.get_duration(filename=audio_path)\n", - "\n", - " # Write the metadata to the manifest\n", - " metadata = {\n", - " \"audio_filepath\": audio_path,\n", - " \"duration\": duration,\n", - " \"text\": transcript\n", - " }\n", - " json.dump(metadata, fout)\n", - " fout.write('\\n')\n", - " \n", - "# Building Manifests\n", - "print(\"******\")\n", - "train_transcripts = data_dir + '/an4/etc/an4_train.transcription'\n", - "train_manifest = data_dir + '/an4/train_manifest.json'\n", - "if not os.path.isfile(train_manifest):\n", - " build_manifest(train_transcripts, train_manifest, 'an4/wav/an4_clstk')\n", - " print(\"Training manifest created.\")\n", - "\n", - "test_transcripts = data_dir + '/an4/etc/an4_test.transcription'\n", - "test_manifest = data_dir + '/an4/test_manifest.json'\n", - "if not os.path.isfile(test_manifest):\n", - " build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk')\n", - " print(\"Test manifest created.\")\n", + "# --- Building Manifest Files --- #\r\n", + "import json\r\n", + "import librosa\r\n", + "\r\n", + "# Function to build a manifest\r\n", + "def build_manifest(transcripts_path, manifest_path, wav_path):\r\n", + " with open(transcripts_path, 'r') as fin:\r\n", + " with open(manifest_path, 'w') as fout:\r\n", + " for line in fin:\r\n", + " # Lines look like this:\r\n", + " # transcript (fileID)\r\n", + " transcript = line[: line.find('(')-1].lower()\r\n", + " transcript = transcript.replace('', '').replace('', '')\r\n", + " transcript = transcript.strip()\r\n", + "\r\n", + " file_id = line[line.find('(')+1 : -2] # e.g. \"cen4-fash-b\"\r\n", + " audio_path = os.path.join(\r\n", + " data_dir, wav_path,\r\n", + " file_id[file_id.find('-')+1 : file_id.rfind('-')],\r\n", + " file_id + '.wav')\r\n", + "\r\n", + " duration = librosa.core.get_duration(filename=audio_path)\r\n", + "\r\n", + " # Write the metadata to the manifest\r\n", + " metadata = {\r\n", + " \"audio_filepath\": audio_path,\r\n", + " \"duration\": duration,\r\n", + " \"text\": transcript\r\n", + " }\r\n", + " json.dump(metadata, fout)\r\n", + " fout.write('\\n')\r\n", + " \r\n", + "# Building Manifests\r\n", + "print(\"******\")\r\n", + "train_transcripts = data_dir + '/an4/etc/an4_train.transcription'\r\n", + "train_manifest = data_dir + '/an4/train_manifest.json'\r\n", + "if not os.path.isfile(train_manifest):\r\n", + " build_manifest(train_transcripts, train_manifest, 'an4/wav/an4_clstk')\r\n", + " print(\"Training manifest created.\")\r\n", + "\r\n", + "test_transcripts = data_dir + '/an4/etc/an4_test.transcription'\r\n", + "test_manifest = data_dir + '/an4/test_manifest.json'\r\n", + "if not os.path.isfile(test_manifest):\r\n", + " build_manifest(test_transcripts, test_manifest, 'an4/wav/an4test_clstk')\r\n", + " print(\"Test manifest created.\")\r\n", "print(\"***Done***\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -478,14 +494,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "PSv_wZTQf50U" }, - "outputs": [], "source": [ "!head -n 5 {data_dir}/an4/train_manifest.json" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -493,25 +509,25 @@ "id": "3S80tsTHhDmU" }, "source": [ - "## Build a custom tokenizer\n", - "\n", - "Next, we will use a NeMo script to easily build a tokenizer for the above dataset. The script takes a few arguments, which will be explained in detail.\n", - "\n", + "## Build a custom tokenizer\r\n", + "\r\n", + "Next, we will use a NeMo script to easily build a tokenizer for the above dataset. The script takes a few arguments, which will be explained in detail.\r\n", + "\r\n", "First, download the tokenizer creation script from the nemo repository." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "ESHI2piTgJRO" }, - "outputs": [], "source": [ "if not os.path.exists(\"scripts/tokenizers/process_asr_text_tokenizer.py\"):\n", " !mkdir scripts\n", " !wget -P scripts/ https://raw.githubusercontent.com/NVIDIA/NeMo/$BRANCH/scripts/tokenizers/process_asr_text_tokenizer.py" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -519,36 +535,34 @@ "id": "BkcpeYp1iIsU" }, "source": [ - "The script above takes a few important arguments -\n", - "\n", - " - either `--manifest` or `--data_file`: If your text data lies inside of an ASR manifest file, then use the `--manifest` path. If instead the text data is inside a file with separate lines corresponding to different text lines, then use `--data_file`. In either case, you can add commas to concatenate different manifests or different data files.\n", - "\n", - " - `--data_root`: The output directory (whose subdirectories will be created if not present) where the tokenizers will be placed.\n", - "\n", - " - `--vocab_size`: The size of the tokenizer vocabulary. Larger vocabularies can accommodate almost entire words, but the decoder size of any model will grow proportionally.\n", - "\n", - " - `--tokenizer`: Can be either `spe` or `wpe` . `spe` refers to the Google `sentencepiece` library tokenizer. `wpe` refers to the HuggingFace BERT Word Piece tokenizer. Please refer to the papers above for the relevant technique in order to select an appropriate tokenizer.\n", - "\n", - " - `--no_lower_case`: When this flag is passed, it will force the tokenizer to create separate tokens for upper and lower case characters. By default, the script will turn all the text to lower case before tokenization (and if upper case characters are passed during training/inference, the tokenizer will emit a token equivalent to Out-Of-Vocabulary). Used primarily for the English language. \n", - "\n", - " - `--spe_type`: The `sentencepiece` library has a few implementations of the tokenization technique, and `spe_type` refers to these implementations. Currently supported types are `unigram`, `bpe`, `char`, `word`. Defaults to `bpe`.\n", - "\n", - " - `--spe_character_coverage`: The `sentencepiece` library considers how much of the original vocabulary it should cover in its \"base set\" of tokens (akin to the lower and upper case characters of the English language). For almost all languages with small base token sets `(<1000 tokens)`, this should be kept at its default of 1.0. For languages with larger vocabularies (say Japanese, Mandarin, Korean etc), the suggested value is 0.9995.\n", - "\n", - " - `--spe_sample_size`: If the dataset is too large, consider using a sampled dataset indicated by a positive integer. By default, any negative value (default = -1) will use the entire dataset.\n", - "\n", - " - `--spe_train_extremely_large_corpus`: When training a sentencepiece tokenizer on very large amounts of text, sometimes the tokenizer will run out of memory or wont be able to process so much data on RAM. At some point you might receive the following error - \"Input corpus too large, try with train_extremely_large_corpus=true\". If your machine has large amounts of RAM, it might still be possible to build the tokenizer using the above flag. Will silently fail if it runs out of RAM.\n", - "\n", + "The script above takes a few important arguments -\r\n", + "\r\n", + " - either `--manifest` or `--data_file`: If your text data lies inside of an ASR manifest file, then use the `--manifest` path. If instead the text data is inside a file with separate lines corresponding to different text lines, then use `--data_file`. In either case, you can add commas to concatenate different manifests or different data files.\r\n", + "\r\n", + " - `--data_root`: The output directory (whose subdirectories will be created if not present) where the tokenizers will be placed.\r\n", + "\r\n", + " - `--vocab_size`: The size of the tokenizer vocabulary. Larger vocabularies can accommodate almost entire words, but the decoder size of any model will grow proportionally.\r\n", + "\r\n", + " - `--tokenizer`: Can be either `spe` or `wpe` . `spe` refers to the Google `sentencepiece` library tokenizer. `wpe` refers to the HuggingFace BERT Word Piece tokenizer. Please refer to the papers above for the relevant technique in order to select an appropriate tokenizer.\r\n", + "\r\n", + " - `--no_lower_case`: When this flag is passed, it will force the tokenizer to create separate tokens for upper and lower case characters. By default, the script will turn all the text to lower case before tokenization (and if upper case characters are passed during training/inference, the tokenizer will emit a token equivalent to Out-Of-Vocabulary). Used primarily for the English language. \r\n", + "\r\n", + " - `--spe_type`: The `sentencepiece` library has a few implementations of the tokenization technique, and `spe_type` refers to these implementations. Currently supported types are `unigram`, `bpe`, `char`, `word`. Defaults to `bpe`.\r\n", + "\r\n", + " - `--spe_character_coverage`: The `sentencepiece` library considers how much of the original vocabulary it should cover in its \"base set\" of tokens (akin to the lower and upper case characters of the English language). For almost all languages with small base token sets `(<1000 tokens)`, this should be kept at its default of 1.0. For languages with larger vocabularies (say Japanese, Mandarin, Korean etc), the suggested value is 0.9995.\r\n", + "\r\n", + " - `--spe_sample_size`: If the dataset is too large, consider using a sampled dataset indicated by a positive integer. By default, any negative value (default = -1) will use the entire dataset.\r\n", + "\r\n", + " - `--spe_train_extremely_large_corpus`: When training a sentencepiece tokenizer on very large amounts of text, sometimes the tokenizer will run out of memory or wont be able to process so much data on RAM. At some point you might receive the following error - \"Input corpus too large, try with train_extremely_large_corpus=true\". If your machine has large amounts of RAM, it might still be possible to build the tokenizer using the above flag. Will silently fail if it runs out of RAM.\r\n", + "\r\n", " - `--log`: Whether the script should display log messages" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "mAw4WMqbh6ii" }, - "outputs": [], "source": [ "!python ./scripts/process_asr_text_tokenizer.py \\\n", " --manifest=\"{data_dir}/an4/train_manifest.json\" \\\n", @@ -558,7 +572,9 @@ " --no_lower_case \\\n", " --spe_type=\"unigram\" \\\n", " --log" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -566,23 +582,23 @@ "id": "gaIFIKgol-p2" }, "source": [ - "-----\n", - "\n", - "That's it! Our tokenizer is now built and stored inside the `data_root` directory that we provided to the script.\n", - "\n", + "-----\r\n", + "\r\n", + "That's it! Our tokenizer is now built and stored inside the `data_root` directory that we provided to the script.\r\n", + "\r\n", "First we start by inspecting the tokenizer vocabulary itself. To keep it manageable, we will print just the first 10 tokens of the vocabulary:" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "0A9fSpr4l58u" }, - "outputs": [], "source": [ "!head -n 10 {data_dir}/tokenizers/an4/tokenizer_spe_unigram_v32/vocab.txt" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -590,31 +606,31 @@ "id": "kPuyTHGTm8Q-" }, "source": [ - "# Training an ASR Model with subword tokenization\n", - "\n", - "Now that our tokenizer is built, let's begin constructing an ASR model that will use this tokenizer for its dataset pre-processing and post-processing steps.\n", - "\n", - "We will use a Citrinet model to demonstrate the usage of subword tokenization models for training and inference. Citrinet is a [QuartzNet-like architecture](https://arxiv.org/abs/1910.10261), but it uses subword-tokenization along with 8x subsampling and [Squeeze-and-Excitation](https://arxiv.org/abs/1709.01507) to achieve strong accuracy in transcriptions while still using non-autoregressive decoding for efficient inference.\n", - "\n", - "We'll be using the **Neural Modules (NeMo) toolkit** for this part, so if you haven't already, you should download and install NeMo and its dependencies. To do so, just follow the directions on the [GitHub page](https://github.com/NVIDIA/NeMo), or in the [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/).\n", - "\n", + "# Training an ASR Model with subword tokenization\r\n", + "\r\n", + "Now that our tokenizer is built, let's begin constructing an ASR model that will use this tokenizer for its dataset pre-processing and post-processing steps.\r\n", + "\r\n", + "We will use a Citrinet model to demonstrate the usage of subword tokenization models for training and inference. Citrinet is a [QuartzNet-like architecture](https://arxiv.org/abs/1910.10261), but it uses subword-tokenization along with 8x subsampling and [Squeeze-and-Excitation](https://arxiv.org/abs/1709.01507) to achieve strong accuracy in transcriptions while still using non-autoregressive decoding for efficient inference.\r\n", + "\r\n", + "We'll be using the **Neural Modules (NeMo) toolkit** for this part, so if you haven't already, you should download and install NeMo and its dependencies. To do so, just follow the directions on the [GitHub page](https://github.com/NVIDIA/NeMo), or in the [documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/).\r\n", + "\r\n", "NeMo let us easily hook together the components (modules) of our model, such as the data layer, intermediate layers, and various losses, without worrying too much about implementation details of individual parts or connections between modules. NeMo also comes with complete models which only require your data and hyperparameters for training." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "jALgpGLjmaCw" }, - "outputs": [], "source": [ - "# NeMo's \"core\" package\n", - "import nemo\n", - "# NeMo's ASR collection - this collections contains complete ASR models and\n", - "# building blocks (modules) for ASR\n", + "# NeMo's \"core\" package\r\n", + "import nemo\r\n", + "# NeMo's ASR collection - this collections contains complete ASR models and\r\n", + "# building blocks (modules) for ASR\r\n", "import nemo.collections.asr as nemo_asr" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -622,8 +638,8 @@ "id": "msxCiR8epEZu" }, "source": [ - "## Training from scratch\n", - "\n", + "## Training from scratch\r\n", + "\r\n", "To train from scratch, you need to prepare your training data in the right format and specify your models architecture." ] }, @@ -633,51 +649,51 @@ "id": "PasvgSEwpWXd" }, "source": [ - "### Specifying Our Model with a YAML Config File\n", - "\n", - "We'll build a *Citrinet* model for this tutorial and use *greedy CTC decoder*, using the configuration found in `./configs/citrinet_bpe.yaml`.\n", - "\n", - "If we open up this config file, we find model section which describes architecture of our model. A model contains an entry labeled `encoder`, with a field called `jasper` that contains a list with multiple entries. Each of the members in this list specifies one block in our model, and looks something like this:\n", - "```\n", - "- filters: 192\n", - " repeat: 5\n", - " kernel: [11]\n", - " stride: [1]\n", - " dilation: [1]\n", - " dropout: 0.0\n", - " residual: false\n", - " separable: true\n", - " se: true\n", - " se_context_size: -1\n", - "```\n", - "The first member of the list corresponds to the first block in the QuartzNet/Citrinet architecture diagram. \n", - "\n", - "Some entries at the top of the file specify how we will handle training (`train_ds`) and validation (`validation_ds`) data.\n", - "\n", + "### Specifying Our Model with a YAML Config File\r\n", + "\r\n", + "We'll build a *Citrinet* model for this tutorial and use *greedy CTC decoder*, using the configuration found in `./configs/citrinet_bpe.yaml`.\r\n", + "\r\n", + "If we open up this config file, we find model section which describes architecture of our model. A model contains an entry labeled `encoder`, with a field called `jasper` that contains a list with multiple entries. Each of the members in this list specifies one block in our model, and looks something like this:\r\n", + "```\r\n", + "- filters: 192\r\n", + " repeat: 5\r\n", + " kernel: [11]\r\n", + " stride: [1]\r\n", + " dilation: [1]\r\n", + " dropout: 0.0\r\n", + " residual: false\r\n", + " separable: true\r\n", + " se: true\r\n", + " se_context_size: -1\r\n", + "```\r\n", + "The first member of the list corresponds to the first block in the QuartzNet/Citrinet architecture diagram. \r\n", + "\r\n", + "Some entries at the top of the file specify how we will handle training (`train_ds`) and validation (`validation_ds`) data.\r\n", + "\r\n", "Using a YAML config such as this helps get a quick and human-readable overview of what your architecture looks like, and allows you to swap out model and run configurations easily without needing to change your code." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "XLUDyWOmo8xZ" }, - "outputs": [], "source": [ "from omegaconf import OmegaConf, open_dict" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "p1O8JRk1qXX9" }, - "outputs": [], "source": [ "params = OmegaConf.load(\"./configs/config_bpe.yaml\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -690,14 +706,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "raXzemtIqjL-" }, - "outputs": [], "source": [ "print(OmegaConf.to_yaml(params))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -705,29 +721,29 @@ "id": "Nw-8epOcuCcG" }, "source": [ - "## Specifying the tokenizer to the model\n", - "\n", - "Now that we have a model config, we are almost ready to train it ! We just have to inform it where the tokenizer directory exists and it will do the rest for us !\n", - "\n", - "We have to provide just two pieces of information via the config:\n", - "\n", - " - `tokenizer.dir`: The directory where the tokenizer files are stored\n", - " - `tokenizer.type`: Can be `bpe` (for `sentencepiece` based tokenizers) or `wpe` (for HuggingFace based BERT Word Piece Tokenizers. Represents what type of tokenizer is being supplied and parse its directory to construct the actual tokenizer.\n", - "\n", - "**Note**: We only have to provide the **directory** where the tokenizer file exists along with its vocabulary and any other essential components. We pass the directory instead of an explicit vocabulary path, since not all libraries construct their tokenizer in the same manner, so the model will figure out how it should prepare the tokenizer.\n" + "## Specifying the tokenizer to the model\r\n", + "\r\n", + "Now that we have a model config, we are almost ready to train it ! We just have to inform it where the tokenizer directory exists and it will do the rest for us !\r\n", + "\r\n", + "We have to provide just two pieces of information via the config:\r\n", + "\r\n", + " - `tokenizer.dir`: The directory where the tokenizer files are stored\r\n", + " - `tokenizer.type`: Can be `bpe` (for `sentencepiece` based tokenizers) or `wpe` (for HuggingFace based BERT Word Piece Tokenizers. Represents what type of tokenizer is being supplied and parse its directory to construct the actual tokenizer.\r\n", + "\r\n", + "**Note**: We only have to provide the **directory** where the tokenizer file exists along with its vocabulary and any other essential components. We pass the directory instead of an explicit vocabulary path, since not all libraries construct their tokenizer in the same manner, so the model will figure out how it should prepare the tokenizer.\r\n" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "YME-v0rcudUz" }, - "outputs": [], "source": [ - "params.model.tokenizer.dir = data_dir + \"/tokenizers/an4/tokenizer_spe_unigram_v32/\" # note this is a directory, not a path to a vocabulary file\n", + "params.model.tokenizer.dir = data_dir + \"/tokenizers/an4/tokenizer_spe_unigram_v32/\" # note this is a directory, not a path to a vocabulary file\r\n", "params.model.tokenizer.type = \"bpe\"" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -735,24 +751,24 @@ "id": "ceelkfIHrHTR" }, "source": [ - "### Training with PyTorch Lightning\n", - "\n", - "NeMo models and modules can be used in any PyTorch code where torch.nn.Module is expected.\n", - "\n", + "### Training with PyTorch Lightning\r\n", + "\r\n", + "NeMo models and modules can be used in any PyTorch code where torch.nn.Module is expected.\r\n", + "\r\n", "However, NeMo's models are based on [PytorchLightning's](https://github.com/PyTorchLightning/pytorch-lightning) LightningModule and we recommend you use PytorchLightning for training and fine-tuning as it makes using mixed precision and distributed training very easy. So to start, let's create Trainer instance for training on GPU for 50 epochs" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "3rslHEKeq9qy" }, - "outputs": [], "source": [ - "import pytorch_lightning as pl\n", + "import pytorch_lightning as pl\r\n", "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=50)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -760,25 +776,25 @@ "id": "pLbXg1swre_M" }, "source": [ - "Next, we instantiate and ASR model based on our ``citrinet_bpe.yaml`` file from the previous section.\n", + "Next, we instantiate and ASR model based on our ``citrinet_bpe.yaml`` file from the previous section.\r\n", "Note that this is a stage during which we also tell the model where our training and validation manifests are." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "v7RnwRpprb2S" }, - "outputs": [], "source": [ - "# Update paths to dataset\n", - "params.model.train_ds.manifest_filepath = train_manifest\n", - "params.model.validation_ds.manifest_filepath = test_manifest\n", - "\n", - "# remove spec augment for this dataset\n", + "# Update paths to dataset\r\n", + "params.model.train_ds.manifest_filepath = train_manifest\r\n", + "params.model.validation_ds.manifest_filepath = test_manifest\r\n", + "\r\n", + "# remove spec augment for this dataset\r\n", "params.model.spec_augment.rect_masks = 0" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -786,21 +802,21 @@ "id": "2qLDHHOOx8T1" }, "source": [ - "Note the subtle difference in the model that we instantiate - `EncDecCTCModelBPE` instead of `EncDecCTCModel`. \n", - "\n", + "Note the subtle difference in the model that we instantiate - `EncDecCTCModelBPE` instead of `EncDecCTCModel`. \r\n", + "\r\n", "`EncDecCTCModelBPE` is nearly identical to `EncDecCTCModel` (it is in fact a subclass!) that simply adds support for subword tokenization." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "YVNc9IxdwXp7" }, - "outputs": [], "source": [ "first_asr_model = nemo_asr.models.EncDecCTCModelBPE(cfg=params.model, trainer=trainer)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -808,17 +824,15 @@ "id": "gJd4gE1uzCuO" }, "source": [ - "### Training: Monitoring Progress\n", + "### Training: Monitoring Progress\r\n", "We can now start Tensorboard to see how training went. Recall that WER stands for Word Error Rate and so the lower it is, the better." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "50qMnqagy8VM" }, - "outputs": [], "source": [ "try:\n", " from google import colab\n", @@ -832,7 +846,9 @@ " %tensorboard --logdir lightning_logs/\n", "else:\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -845,15 +861,15 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "_iFfkFBTryQn" }, - "outputs": [], "source": [ - "# Start training!!!\n", + "# Start training!!!\r\n", "trainer.fit(first_asr_model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -861,32 +877,32 @@ "id": "HQ2aSenF90hs" }, "source": [ - "Save the model easily along with the tokenizer using `save_to`. \n", - "\n", + "Save the model easily along with the tokenizer using `save_to`. \r\n", + "\r\n", "Later, we use `restore_from` to restore the model, it will also reinitialize the tokenizer !" ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "6idt0dfO9z-S" }, - "outputs": [], "source": [ "first_asr_model.save_to(\"first_model.nemo\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "RpHwCTk1-q4t" }, - "outputs": [], "source": [ "!ls -l -- *.nemo" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -894,8 +910,8 @@ "id": "VIupynXOxODi" }, "source": [ - "There we go! We've put together a full training pipeline for the model and trained it for 50 epochs.\n", - "\n", + "There we go! We've put together a full training pipeline for the model and trained it for 50 epochs.\r\n", + "\r\n", "If you'd like to save this model checkpoint for loading later (e.g. for fine-tuning, or for continuing training), you can simply call `first_asr_model.save_to()`. Then, to restore your weights, you can rebuild the model using the config (let's say you call it `first_asr_model_continued` this time) and call `first_asr_model_continued.restore_from()`." ] }, @@ -910,14 +926,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "wLR7PfEzxbO1" }, - "outputs": [], "source": [ "print(params.model.optim)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -925,25 +941,25 @@ "id": "7wfmZWf-xlNV" }, "source": [ - "### After training and hyper parameter tuning\n", - "\n", + "### After training and hyper parameter tuning\r\n", + "\r\n", "Let's say we wanted to change the learning rate. To do so, we can create a `new_opt` dict and set our desired learning rate, then call `.setup_optimization()` with the new optimization parameters." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "cH31LyZwxi_p" }, - "outputs": [], "source": [ - "import copy\n", - "new_opt = copy.deepcopy(params.model.optim)\n", - "new_opt.lr = 0.1\n", - "first_asr_model.setup_optimization(optim_config=new_opt);\n", + "import copy\r\n", + "new_opt = copy.deepcopy(params.model.optim)\r\n", + "new_opt.lr = 0.1\r\n", + "first_asr_model.setup_optimization(optim_config=new_opt);\r\n", "# And then you can invoke trainer.fit(first_asr_model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -951,27 +967,27 @@ "id": "azH7U-K8x0rd" }, "source": [ - "## Inference\n", - "\n", - "Let's have a quick look at how one could run inference with NeMo's ASR model.\n", - "\n", + "## Inference\r\n", + "\r\n", + "Let's have a quick look at how one could run inference with NeMo's ASR model.\r\n", + "\r\n", "First, ``EncDecCTCModelBPE`` and its subclasses contain a handy ``transcribe`` method which can be used to simply obtain audio files' transcriptions. It also has batch_size argument to improve performance." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "O64yk8C4xvTG" }, - "outputs": [], "source": [ "print(first_asr_model.transcribe(paths2audio_files=[data_dir + '/an4/wav/an4_clstk/mgah/cen2-mgah-b.wav',\n", " data_dir + '/an4/wav/an4_clstk/fmjd/cen7-fmjd-b.wav',\n", " data_dir + '/an4/wav/an4_clstk/fmjd/cen8-fmjd-b.wav',\n", " data_dir + '/an4/wav/an4_clstk/fkai/cen8-fkai-b.wav'],\n", " batch_size=4))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -984,50 +1000,50 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "Eo2TcBkozlEG" }, - "outputs": [], "source": [ - "# Bigger batch-size = bigger throughput\n", - "params['model']['validation_ds']['batch_size'] = 16\n", - "\n", - "# Setup the test data loader and make sure the model is on GPU\n", - "first_asr_model.setup_test_data(test_data_config=params['model']['validation_ds'])\n", - "first_asr_model.cuda()\n", - "first_asr_model.eval()\n", - "\n", - "# We remove some preprocessing artifacts which benefit training\n", - "first_asr_model.preprocessor.featurizer.pad_to = 0\n", - "first_asr_model.preprocessor.featurizer.dither = 0.0\n", - "\n", - "# We will be computing Word Error Rate (WER) metric between our hypothesis and predictions.\n", - "# WER is computed as numerator/denominator.\n", - "# We'll gather all the test batches' numerators and denominators.\n", - "wer_nums = []\n", - "wer_denoms = []\n", - "\n", - "# Loop over all test batches.\n", - "# Iterating over the model's `test_dataloader` will give us:\n", - "# (audio_signal, audio_signal_length, transcript_tokens, transcript_length)\n", - "# See the AudioToCharDataset for more details.\n", - "for test_batch in first_asr_model.test_dataloader():\n", - " test_batch = [x.cuda() for x in test_batch]\n", - " targets = test_batch[2]\n", - " targets_lengths = test_batch[3] \n", - " log_probs, encoded_len, greedy_predictions = first_asr_model(\n", - " input_signal=test_batch[0], input_signal_length=test_batch[1]\n", - " )\n", - " # Notice the model has a helper object to compute WER\n", - " first_asr_model._wer.update(greedy_predictions, targets, targets_lengths)\n", - " _, wer_num, wer_denom = first_asr_model._wer.compute()\n", - " wer_nums.append(wer_num.detach().cpu().numpy())\n", - " wer_denoms.append(wer_denom.detach().cpu().numpy())\n", - "\n", - "# We need to sum all numerators and denominators first. Then divide.\n", + "# Bigger batch-size = bigger throughput\r\n", + "params['model']['validation_ds']['batch_size'] = 16\r\n", + "\r\n", + "# Setup the test data loader and make sure the model is on GPU\r\n", + "first_asr_model.setup_test_data(test_data_config=params['model']['validation_ds'])\r\n", + "first_asr_model.cuda()\r\n", + "first_asr_model.eval()\r\n", + "\r\n", + "# We remove some preprocessing artifacts which benefit training\r\n", + "first_asr_model.preprocessor.featurizer.pad_to = 0\r\n", + "first_asr_model.preprocessor.featurizer.dither = 0.0\r\n", + "\r\n", + "# We will be computing Word Error Rate (WER) metric between our hypothesis and predictions.\r\n", + "# WER is computed as numerator/denominator.\r\n", + "# We'll gather all the test batches' numerators and denominators.\r\n", + "wer_nums = []\r\n", + "wer_denoms = []\r\n", + "\r\n", + "# Loop over all test batches.\r\n", + "# Iterating over the model's `test_dataloader` will give us:\r\n", + "# (audio_signal, audio_signal_length, transcript_tokens, transcript_length)\r\n", + "# See the AudioToCharDataset for more details.\r\n", + "for test_batch in first_asr_model.test_dataloader():\r\n", + " test_batch = [x.cuda() for x in test_batch]\r\n", + " targets = test_batch[2]\r\n", + " targets_lengths = test_batch[3] \r\n", + " log_probs, encoded_len, greedy_predictions = first_asr_model(\r\n", + " input_signal=test_batch[0], input_signal_length=test_batch[1]\r\n", + " )\r\n", + " # Notice the model has a helper object to compute WER\r\n", + " first_asr_model._wer.update(greedy_predictions, targets, targets_lengths)\r\n", + " _, wer_num, wer_denom = first_asr_model._wer.compute()\r\n", + " wer_nums.append(wer_num.detach().cpu().numpy())\r\n", + " wer_denoms.append(wer_denom.detach().cpu().numpy())\r\n", + "\r\n", + "# We need to sum all numerators and denominators first. Then divide.\r\n", "print(f\"WER = {sum(wer_nums)/sum(wer_denoms)}\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1044,22 +1060,22 @@ "id": "dtl9vEhx3MG7" }, "source": [ - "## Utilizing the underlying tokenizer\n", - "\n", + "## Utilizing the underlying tokenizer\r\n", + "\r\n", "Since the model has an underlying tokenizer, it would be nice to use it externally as well - say for getting the subwords of the transcript or to tokenize a dataset using the same tokenizer as the ASR model." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "fdXg21if2YRp" }, - "outputs": [], "source": [ - "tokenizer = first_asr_model.tokenizer\n", + "tokenizer = first_asr_model.tokenizer\r\n", "tokenizer" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1067,22 +1083,22 @@ "id": "Y96SOqpJ3kG3" }, "source": [ - "You can get the tokenizer's vocabulary using the `tokenizer.tokenizer.get_vocab()` method. \n", - "\n", + "You can get the tokenizer's vocabulary using the `tokenizer.tokenizer.get_vocab()` method. \r\n", + "\r\n", "ASR tokenizers will map the subword to an integer index in the vocabulary for convenience." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "F56_tIRM3g3f" }, - "outputs": [], "source": [ - "vocab = tokenizer.tokenizer.get_vocab()\n", + "vocab = tokenizer.tokenizer.get_vocab()\r\n", "vocab" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1095,51 +1111,51 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "-2tMVskF3uUf" }, - "outputs": [], "source": [ - "tokens = tokenizer.text_to_tokens(\"hello world\")\n", + "tokens = tokenizer.text_to_tokens(\"hello world\")\r\n", "tokens" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "CkxHkKQn4Q-E" }, - "outputs": [], "source": [ - "token_ids = tokenizer.text_to_ids(\"hello world\")\n", + "token_ids = tokenizer.text_to_ids(\"hello world\")\r\n", "token_ids" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "tpdoIrRt4Xim" }, - "outputs": [], "source": [ - "subwords = tokenizer.ids_to_tokens(token_ids)\n", + "subwords = tokenizer.ids_to_tokens(token_ids)\r\n", "subwords" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "wudNyONi4og8" }, - "outputs": [], "source": [ - "text = tokenizer.ids_to_text(token_ids)\n", + "text = tokenizer.ids_to_text(token_ids)\r\n", "text" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1147,29 +1163,29 @@ "id": "E35VBsbf4yWy" }, "source": [ - "## Model Improvements\n", - "\n", - "You already have all you need to create your own ASR model in NeMo, but there are a few more tricks that you can employ if you so desire. In this section, we'll briefly cover a few possibilities for improving an ASR model.\n", - "\n", - "### Data Augmentation\n", - "\n", - "There exist several ASR data augmentation methods that can increase the size of our training set.\n", - "\n", - "For example, we can perform augmentation on the spectrograms by zeroing out specific frequency segments (\"frequency masking\") or time segments (\"time masking\") as described by [SpecAugment](https://arxiv.org/abs/1904.08779), or zero out rectangles on the spectrogram as in [Cutout](https://arxiv.org/pdf/1708.04552.pdf). In NeMo, we can do all three of these by simply adding a `SpectrogramAugmentation` neural module. (As of now, it does not perform the time warping from the SpecAugment paper.)\n", - "\n", + "## Model Improvements\r\n", + "\r\n", + "You already have all you need to create your own ASR model in NeMo, but there are a few more tricks that you can employ if you so desire. In this section, we'll briefly cover a few possibilities for improving an ASR model.\r\n", + "\r\n", + "### Data Augmentation\r\n", + "\r\n", + "There exist several ASR data augmentation methods that can increase the size of our training set.\r\n", + "\r\n", + "For example, we can perform augmentation on the spectrograms by zeroing out specific frequency segments (\"frequency masking\") or time segments (\"time masking\") as described by [SpecAugment](https://arxiv.org/abs/1904.08779), or zero out rectangles on the spectrogram as in [Cutout](https://arxiv.org/pdf/1708.04552.pdf). In NeMo, we can do all three of these by simply adding a `SpectrogramAugmentation` neural module. (As of now, it does not perform the time warping from the SpecAugment paper.)\r\n", + "\r\n", "Our toy model disables spectrogram augmentation, because it is not significantly beneficial for the short demo." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "SMi6Bauy4Jhg" }, - "outputs": [], "source": [ "print(OmegaConf.to_yaml(first_asr_model._cfg['spec_augment']))" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1186,12 +1202,12 @@ "id": "fDTC4fXZ5QnT" }, "source": [ - "### Transfer learning\n", - "\n", - "Transfer learning is an important machine learning technique that uses a model’s knowledge of one task to perform better on another. Fine-tuning is one of the techniques to perform transfer learning. It is an essential part of the recipe for many state-of-the-art results where a base model is first pretrained on a task with abundant training data and then fine-tuned on different tasks of interest where the training data is less abundant or even scarce.\n", - "\n", - "In ASR you might want to do fine-tuning in multiple scenarios, for example, when you want to improve your model's performance on a particular domain (medical, financial, etc.) or accented speech. You can even transfer learn from one language to another! Check out [this paper](https://arxiv.org/abs/2005.04290) for examples.\n", - "\n", + "### Transfer learning\r\n", + "\r\n", + "Transfer learning is an important machine learning technique that uses a model’s knowledge of one task to perform better on another. Fine-tuning is one of the techniques to perform transfer learning. It is an essential part of the recipe for many state-of-the-art results where a base model is first pretrained on a task with abundant training data and then fine-tuned on different tasks of interest where the training data is less abundant or even scarce.\r\n", + "\r\n", + "In ASR you might want to do fine-tuning in multiple scenarios, for example, when you want to improve your model's performance on a particular domain (medical, financial, etc.) or accented speech. You can even transfer learn from one language to another! Check out [this paper](https://arxiv.org/abs/2005.04290) for examples.\r\n", + "\r\n", "Transfer learning with NeMo is simple. Let's demonstrate how we could fine-tune the model we trained earlier on AN4 data. (NOTE: this is a toy example). And, while we are at it, we will change the model's vocabulary to demonstrate how it's done." ] }, @@ -1201,17 +1217,15 @@ "id": "IN0LbDbY5YR1" }, "source": [ - "-----\n", + "-----\r\n", "First, let's create another tokenizer - perhaps using a larger vocabulary size than the small tokenizer we created earlier. Also we swap out `sentencepiece` for `BERT Word Piece` tokenizer." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "LFENXcXw48fc" }, - "outputs": [], "source": [ "!python ./scripts/process_asr_text_tokenizer.py \\\n", " --manifest=\"{data_dir}/an4/train_manifest.json\" \\\n", @@ -1220,7 +1234,9 @@ " --tokenizer=\"wpe\" \\\n", " --no_lower_case \\\n", " --log" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1233,14 +1249,14 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "QtyAB9fQ_qbj" }, - "outputs": [], "source": [ "restored_model = nemo_asr.models.EncDecCTCModelBPE.restore_from(\"./first_model.nemo\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1253,11 +1269,9 @@ }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "4Ey9CUkJ5o56" }, - "outputs": [], "source": [ "# Check what kind of vocabulary/alphabet the model has right now\n", "print(restored_model.decoder.vocabulary)\n", @@ -1268,7 +1282,9 @@ " new_tokenizer_dir=data_dir + \"/tokenizers/an4/tokenizer_wpe_v64/\",\n", " new_tokenizer_type=\"wpe\"\n", ")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1276,60 +1292,60 @@ "id": "UZ3sf2P26SiA" }, "source": [ - "After this, our decoder has completely changed, but our encoder (where most of the weights are) remained intact. Let's fine tune-this model for 20 epochs on AN4 dataset. We will also use the smaller learning rate from ``new_opt` (see the \"After Training\" section)`.\n", - "\n", + "After this, our decoder has completely changed, but our encoder (where most of the weights are) remained intact. Let's fine tune-this model for 20 epochs on AN4 dataset. We will also use the smaller learning rate from ``new_opt` (see the \"After Training\" section)`.\r\n", + "\r\n", "**Note**: For this demonstration, we will also freeze the encoder to speed up finetuning (since both tokenizers are built on the same train set), but in general it should not be done for proper training on a new language (or on a different corpus than the original train corpus)." ] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "7m_CRtH46BjO" }, - "outputs": [], "source": [ - "# Use the smaller learning rate we set before\n", - "restored_model.setup_optimization(optim_config=new_opt)\n", - "\n", - "# Point to the data we'll use for fine-tuning as the training set\n", - "restored_model.setup_training_data(train_data_config=params['model']['train_ds'])\n", - "\n", - "# Point to the new validation data for fine-tuning\n", - "restored_model.setup_validation_data(val_data_config=params['model']['validation_ds'])\n", - "\n", - "# Freeze the encoder layers (should not be done for finetuning, only done for demo)\n", + "# Use the smaller learning rate we set before\r\n", + "restored_model.setup_optimization(optim_config=new_opt)\r\n", + "\r\n", + "# Point to the data we'll use for fine-tuning as the training set\r\n", + "restored_model.setup_training_data(train_data_config=params['model']['train_ds'])\r\n", + "\r\n", + "# Point to the new validation data for fine-tuning\r\n", + "restored_model.setup_validation_data(val_data_config=params['model']['validation_ds'])\r\n", + "\r\n", + "# Freeze the encoder layers (should not be done for finetuning, only done for demo)\r\n", "restored_model.encoder.freeze()" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "uCmUWZLD63d9" }, - "outputs": [], "source": [ - "# Load the TensorBoard notebook extension\n", - "if COLAB_ENV:\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir lightning_logs/\n", - "else:\n", + "# Load the TensorBoard notebook extension\r\n", + "if COLAB_ENV:\r\n", + " %load_ext tensorboard\r\n", + " %tensorboard --logdir lightning_logs/\r\n", + "else:\r\n", " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, "metadata": { "id": "fs2aK7xB6pAd" }, - "outputs": [], "source": [ - "# And now we can create a PyTorch Lightning trainer and call `fit` again.\n", - "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=20)\n", + "# And now we can create a PyTorch Lightning trainer and call `fit` again.\r\n", + "trainer = pl.Trainer(devices=1, accelerator='gpu', max_epochs=20)\r\n", "trainer.fit(restored_model)" - ] + ], + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -1346,22 +1362,22 @@ "id": "alykABQ3CNpf" }, "source": [ - "### Fast Training\n", - "\n", - "Last but not least, we could simply speed up training our model! If you have the resources, you can speed up training by splitting the workload across multiple GPUs. Otherwise (or in addition), there's always mixed precision training, which allows you to increase your batch size.\n", - "\n", - "You can use [PyTorch Lightning's Trainer object](https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html?highlight=Trainer) to handle mixed-precision and distributed training for you. Below are some examples of flags you would pass to the `Trainer` to use these features:\n", - "\n", - "```python\n", - "# Mixed precision:\n", - "trainer = pl.Trainer(amp_level='O1', precision=16)\n", - "\n", - "# Trainer with a distributed backend:\n", - "trainer = pl.Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\n", - "\n", - "# Of course, you can combine these flags as well.\n", - "```\n", - "\n", + "### Fast Training\r\n", + "\r\n", + "Last but not least, we could simply speed up training our model! If you have the resources, you can speed up training by splitting the workload across multiple GPUs. Otherwise (or in addition), there's always mixed precision training, which allows you to increase your batch size.\r\n", + "\r\n", + "You can use [PyTorch Lightning's Trainer object](https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html?highlight=Trainer) to handle mixed-precision and distributed training for you. Below are some examples of flags you would pass to the `Trainer` to use these features:\r\n", + "\r\n", + "```python\r\n", + "# Mixed precision:\r\n", + "trainer = pl.Trainer(amp_level='O1', precision=16)\r\n", + "\r\n", + "# Trainer with a distributed backend:\r\n", + "trainer = pl.Trainer(devices=2, num_nodes=2, accelerator='gpu', strategy='dp')\r\n", + "\r\n", + "# Of course, you can combine these flags as well.\r\n", + "```\r\n", + "\r\n", "Finally, have a look at [example scripts in NeMo repository](https://github.com/NVIDIA/NeMo/blob/stable/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) which can handle mixed precision and distributed training using command-line arguments." ] }, @@ -1371,33 +1387,33 @@ "id": "4uQGWtRJDF0O" }, "source": [ - "## Under the Hood\n", - "\n", - "NeMo is open-source and we do all our model development in the open, so you can inspect our code if you wish.\n", - "\n", - "In particular, ``nemo_asr.model.EncDecCTCModelBPE`` is an encoder-decoder model which is constructed using several ``Neural Modules`` taken from ``nemo_asr.modules.`` Here is what its forward pass looks like:\n", - "```python\n", - "def forward(self, input_signal, input_signal_length):\n", - " processed_signal, processed_signal_len = self.preprocessor(\n", - " input_signal=input_signal, length=input_signal_length,\n", - " )\n", - " # Spec augment is not applied during evaluation/testing\n", - " if self.spec_augmentation is not None and self.training:\n", - " processed_signal = self.spec_augmentation(input_spec=processed_signal)\n", - " encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\n", - " log_probs = self.decoder(encoder_output=encoded)\n", - " greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)\n", - " return log_probs, encoded_len, greedy_predictions\n", - "```\n", - "Here:\n", - "\n", - "* ``self.preprocessor`` is an instance of ``nemo_asr.modules.AudioToMelSpectrogramPreprocessor``, which is a neural module that takes audio signal and converts it into a Mel-Spectrogram\n", - "* ``self.spec_augmentation`` - is a neural module of type ```nemo_asr.modules.SpectrogramAugmentation``, which implements data augmentation. \n", - "* ``self.encoder`` - is a convolutional Jasper, QuartzNet or Citrinet-like encoder of type ``nemo_asr.modules.ConvASREncoder``\n", - "* ``self.decoder`` - is a ``nemo_asr.modules.ConvASRDecoder`` which simply projects into the target alphabet (vocabulary).\n", - "\n", - "Also, ``EncDecCTCModelBPE`` uses the audio dataset class ``nemo_asr.data.AudioToBPEDataset`` and CTC loss implemented in ``nemo_asr.losses.CTCLoss``.\n", - "\n", + "## Under the Hood\r\n", + "\r\n", + "NeMo is open-source and we do all our model development in the open, so you can inspect our code if you wish.\r\n", + "\r\n", + "In particular, ``nemo_asr.model.EncDecCTCModelBPE`` is an encoder-decoder model which is constructed using several ``Neural Modules`` taken from ``nemo_asr.modules.`` Here is what its forward pass looks like:\r\n", + "```python\r\n", + "def forward(self, input_signal, input_signal_length):\r\n", + " processed_signal, processed_signal_len = self.preprocessor(\r\n", + " input_signal=input_signal, length=input_signal_length,\r\n", + " )\r\n", + " # Spec augment is not applied during evaluation/testing\r\n", + " if self.spec_augmentation is not None and self.training:\r\n", + " processed_signal = self.spec_augmentation(input_spec=processed_signal)\r\n", + " encoded, encoded_len = self.encoder(audio_signal=processed_signal, length=processed_signal_len)\r\n", + " log_probs = self.decoder(encoder_output=encoded)\r\n", + " greedy_predictions = log_probs.argmax(dim=-1, keepdim=False)\r\n", + " return log_probs, encoded_len, greedy_predictions\r\n", + "```\r\n", + "Here:\r\n", + "\r\n", + "* ``self.preprocessor`` is an instance of ``nemo_asr.modules.AudioToMelSpectrogramPreprocessor``, which is a neural module that takes audio signal and converts it into a Mel-Spectrogram\r\n", + "* ``self.spec_augmentation`` - is a neural module of type ```nemo_asr.modules.SpectrogramAugmentation``, which implements data augmentation. \r\n", + "* ``self.encoder`` - is a convolutional Jasper, QuartzNet or Citrinet-like encoder of type ``nemo_asr.modules.ConvASREncoder``\r\n", + "* ``self.decoder`` - is a ``nemo_asr.modules.ConvASRDecoder`` which simply projects into the target alphabet (vocabulary).\r\n", + "\r\n", + "Also, ``EncDecCTCModelBPE`` uses the audio dataset class ``nemo_asr.data.AudioToBPEDataset`` and CTC loss implemented in ``nemo_asr.losses.CTCLoss``.\r\n", + "\r\n", "You can use these and other neural modules (or create new ones yourself!) to construct new ASR models." ] }, @@ -1407,39 +1423,23 @@ "id": "5kKcSb7LDdI3" }, "source": [ - "# Further Reading/Watching:\n", - "\n", - "That's all for now! If you'd like to learn more about the topics covered in this tutorial, here are some resources that may interest you:\n", - "- [Stanford Lecture on ASR](https://www.youtube.com/watch?v=3MjIkWxXigM)\n", - "- [\"An Intuitive Explanation of Connectionist Temporal Classification\"](https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c)\n", - "- [Explanation of CTC with Prefix Beam Search](https://medium.com/corti-ai/ctc-networks-and-language-models-prefix-beam-search-explained-c11d1ee23306)\n", - "- [Byte Pair Encoding](https://arxiv.org/abs/1508.07909)\n", - "- [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf)\n", - "- [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing](https://www.aclweb.org/anthology/D18-2012/)\n", - "- [Jasper Paper](https://arxiv.org/abs/1904.03288)\n", - "- [QuartzNet paper](https://arxiv.org/abs/1910.10261)\n", - "- [SpecAugment Paper](https://arxiv.org/abs/1904.08779)\n", - "- [Explanation and visualization of SpecAugment](https://towardsdatascience.com/state-of-the-art-audio-data-augmentation-with-google-brains-specaugment-and-pytorch-d3d1a3ce291e)\n", - "- [Cutout Paper](https://arxiv.org/pdf/1708.04552.pdf)\n", - "- [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507)\n", + "# Further Reading/Watching:\r\n", + "\r\n", + "That's all for now! If you'd like to learn more about the topics covered in this tutorial, here are some resources that may interest you:\r\n", + "- [Stanford Lecture on ASR](https://www.youtube.com/watch?v=3MjIkWxXigM)\r\n", + "- [\"An Intuitive Explanation of Connectionist Temporal Classification\"](https://towardsdatascience.com/intuitively-understanding-connectionist-temporal-classification-3797e43a86c)\r\n", + "- [Explanation of CTC with Prefix Beam Search](https://medium.com/corti-ai/ctc-networks-and-language-models-prefix-beam-search-explained-c11d1ee23306)\r\n", + "- [Byte Pair Encoding](https://arxiv.org/abs/1508.07909)\r\n", + "- [Word Piece Encoding](https://static.googleusercontent.com/media/research.google.com/ja//pubs/archive/37842.pdf)\r\n", + "- [SentencePiece: A simple and language independent subword tokenizer and detokenizer for Neural Text Processing](https://www.aclweb.org/anthology/D18-2012/)\r\n", + "- [Jasper Paper](https://arxiv.org/abs/1904.03288)\r\n", + "- [QuartzNet paper](https://arxiv.org/abs/1910.10261)\r\n", + "- [SpecAugment Paper](https://arxiv.org/abs/1904.08779)\r\n", + "- [Explanation and visualization of SpecAugment](https://towardsdatascience.com/state-of-the-art-audio-data-augmentation-with-google-brains-specaugment-and-pytorch-d3d1a3ce291e)\r\n", + "- [Cutout Paper](https://arxiv.org/pdf/1708.04552.pdf)\r\n", + "- [Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507)\r\n", "- [Transfer Learning Blogpost](https://developer.nvidia.com/blog/jump-start-training-for-speech-recognition-models-with-nemo/)" ] } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "ASR_with_Subword_Tokenization.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb index 5c00335d539f..9a6b7b2380cf 100644 --- a/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb +++ b/tutorials/asr/Buffered_Transducer_Inference_with_LCS_Merge.ipynb @@ -1,4 +1,22 @@ { + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Buffered_Transducer_Inference_with_LCS_Merge.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + }, + "accelerator": "GPU" + }, "cells": [ { "cell_type": "code", @@ -51,9 +69,6 @@ }, { "cell_type": "markdown", - "metadata": { - "id": "cPuPBSU0ioJO" - }, "source": [ "# Buffered Transducer evaluation with Longest Common Subsequence Merge\n", "\n", @@ -66,51 +81,48 @@ "-----\n", "\n", "You may use this script [ASR Chunked Streaming Inference](https://github.com/NVIDIA/NeMo/blob/stable/examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py) to transcribe long audio files with Transducer models as well as experiment with both merge algorithms. \n" - ] + ], + "metadata": { + "id": "cPuPBSU0ioJO" + } }, { "cell_type": "markdown", - "metadata": { - "id": "ylQ3GwvX-n7R" - }, "source": [ "------\n", "\n", "**Note**: It is highly recommended to review the ``Streaming ASR`` tutorial for a good overview of how streaming/buffered inference works for CTC models and the underlying motivation of streaming ASR itself.\n", "\n", "------" - ] + ], + "metadata": { + "id": "ylQ3GwvX-n7R" + } }, { "cell_type": "markdown", - "metadata": { - "id": "2eDAsjyCi3lc" - }, "source": [ "# Prepare the dataset\n", "\n", "We will reuse the Librispeech dev-clean subset of [Mini Librispeech](https://www.openslr.org/31/). This time, we will not concatenate the audio segments but simply evaluate them in buffered mode over all the audio samples.\n", "\n", "**Note**: Conformer inference over the entire dev set will take an exorbitant amount of time on the CPU. We recommend the use of GPU for this tutorial." - ] + ], + "metadata": { + "id": "2eDAsjyCi3lc" + } }, { "cell_type": "markdown", - "metadata": { - "id": "fBYvC3lyjM7O" - }, "source": [ "## Download and prepare Mini Librispeech" - ] + ], + "metadata": { + "id": "fBYvC3lyjM7O" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "LBiTnpz6iket" - }, - "outputs": [], "source": [ "#@title Prepare dataset and manifest for Libripeech Dev Clean subset.\n", "import os\n", @@ -127,38 +139,39 @@ " --data_sets dev_clean_2 \\\n", " --num_workers=10 \\\n", " --log" - ] + ], + "metadata": { + "cellView": "form", + "id": "LBiTnpz6iket" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "KHcy1Jbx8d9V" - }, - "outputs": [], "source": [ "manifest = os.path.join(os.getcwd(), \"datasets/mini-dev-clean/dev_clean_2.json\")\n", "print(\"Manifest path :\", manifest)" - ] + ], + "metadata": { + "id": "KHcy1Jbx8d9V" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "8g61qBwgkHiw" - }, "source": [ "# Prepare the model\n", "\n", "We will use the same Conformer Transducer model used in the `Buffered Transducer Inference` tutorial, which will provide a fair comparison between the proposed merge algorithms described here." - ] + ], + "metadata": { + "id": "8g61qBwgkHiw" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "j9UHfsR1j-uf" - }, - "outputs": [], "source": [ "import torch\n", "import nemo.collections.asr as nemo_asr\n", @@ -176,26 +189,26 @@ "\n", "device = 'cuda' if torch.cuda.is_available() else 'cpu'\n", "device" - ] + ], + "metadata": { + "id": "j9UHfsR1j-uf" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "pretrained_model_name = \"stt_en_conformer_transducer_large\"" + ], "metadata": { "id": "CzkoimqKl07U" }, - "outputs": [], - "source": [ - "pretrained_model_name = \"stt_en_conformer_transducer_large\"" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "0LjtehkvlvKE" - }, - "outputs": [], "source": [ "# Clear up memory\n", "torch.cuda.empty_cache()\n", @@ -205,13 +218,15 @@ "# device = 'cpu' # You can transcribe even longer samples on the CPU, though it will take much longer !\n", "model = model.to(device)\n", "model.freeze()" - ] + ], + "metadata": { + "id": "0LjtehkvlvKE" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "OPZqcbNEnRkI" - }, "source": [ "# Longest Common Subsequence Merge\n", "\n", @@ -220,15 +235,13 @@ "In contrast to the `Middle Token` algorithm, which utilizes certain seconds of both past and future context in order to determine the \"middle tokens\" for that current buffer, the `LCS Merge` algorithm merges only consecutive buffers by selecting the overlap between the end of the `i-1`th buffer and the beginning of the `i`th buffer sub-word tokens, then removing the overlapped tokens from the `i`th buffer.\n", "\n", "While the idea is simple, since the same text can be represented by a different combination of sub-words, some additional expansion steps must be accounted for to account for imperfect alignment between two buffers." - ] + ], + "metadata": { + "id": "OPZqcbNEnRkI" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "pEPLZyJP_zx2" - }, - "outputs": [], "source": [ "### Utility Functions ###\n", "def print_alignment(alignment):\n", @@ -263,13 +276,15 @@ "\n", " extras['alignment'] = alignment\n", " torch.save(extras, filepath)" - ] + ], + "metadata": { + "id": "pEPLZyJP_zx2" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "EwPrhOP2_7D2" - }, "source": [ "## Algorithm overview\n", "\n", @@ -289,15 +304,13 @@ "5. Perform a backward trace of the LCS suffix matrix to find detached sections to know the beginning index of slice and length of slice.\n", "6. Finally, check that beginning index of slice < max number of buffer chunks; if true, then slice off new buffer\n", " " - ] + ], + "metadata": { + "id": "EwPrhOP2_7D2" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AOBFADPdoJc8" - }, - "outputs": [], "source": [ "# Minimum number of tokens required to assign a LCS merge step, otherwise ignore and\n", "# select all i-1 and ith buffer tokens to merge.\n", @@ -523,13 +536,15 @@ " return result_idx, LCSuff\n", "\n", "\n" - ] + ], + "metadata": { + "id": "AOBFADPdoJc8" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "QeCGszfO_5cI" - }, "source": [ "## Merge Overview\n", "\n", @@ -550,15 +565,13 @@ "5. Slice off the new data (`i`th chunk)\n", "\n", "6. Merge the previous and current subset of the chunk and return the merged buffer." - ] + ], + "metadata": { + "id": "QeCGszfO_5cI" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "_6r78fEm_48d" - }, - "outputs": [], "source": [ "def lcs_alignment_merge_buffer(buffer, data, delay, model, max_steps_per_timestep: int = 5, filepath: str = None):\n", " \"\"\"\n", @@ -593,28 +606,28 @@ " # Concat data to buffer\n", " buffer += data\n", " return buffer" - ] + ], + "metadata": { + "id": "_6r78fEm_48d" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "Bz31XOhLqu3z" - }, "source": [ "# LCS Merge algorithm as a basis for Buffered ASR\n", "\n", "Next, let us extend the previous `BatchedFrameASRRNNT` codebase for Buffered Transducer to incorporate the new merge algorithm.\n", "\n", "We will note that the vast majority of the code remains unchanged - only the `transcribe` function has been changed to utilize the new merge algorithm." - ] + ], + "metadata": { + "id": "Bz31XOhLqu3z" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "wNgmc68nl1Ri" - }, - "outputs": [], "source": [ "from nemo.collections.asr.parts.utils import streaming_utils\n", "from torch.utils.data import DataLoader\n", @@ -708,29 +721,28 @@ " for idx in range(self.batch_size):\n", " output.append(self.greedy_merge(self.unmerged[idx]))\n", " return output\n" - ] + ], + "metadata": { + "id": "wNgmc68nl1Ri" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "0fmD9goyrmEb" - }, "source": [ "# Comparing \"Middle Token\" and \"LCS Merge\"\n", "\n", "While we propose the two algorithms - `Middle Token` and `LCS Merge`, we would recommend using either algorithm in the appropriate circumstances. The `Middle Token` algorithm performs well in general, and its mistakes are often fewer than the `LCS Merge` algorithm but requires future context, which may increase latency by a small amount. There are also cases where `LCS Merge` may select better alignments and result in slightly better scores for some audio samples.\n", "\n", "In general, we propose these approaches to discuss further and research merge algorithms that show some trade-off between latency and accuracy." - ] + ], + "metadata": { + "id": "0fmD9goyrmEb" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "FlIGOc_yl4aQ" - }, - "outputs": [], "source": [ "#@title Change Decoding Strategy for Buffered Inference\n", "# Change Decoding Config\n", @@ -743,16 +755,16 @@ " decoding_cfg.fused_batch_size = -1 # temporarily stop fused batch during inference.\n", "\n", "model.change_decoding_strategy(decoding_cfg)" - ] - }, - { - "cell_type": "code", - "execution_count": null, + ], "metadata": { "cellView": "form", - "id": "laPBH4eJsiJk" + "id": "FlIGOc_yl4aQ" }, - "outputs": [], + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", "source": [ "#@title Helper methods to transcribe audio in buffered mode\n", "\n", @@ -804,27 +816,27 @@ " \n", " print(\"Finished transcribing audio files\")\n", " return hyps" - ] + ], + "metadata": { + "id": "laPBH4eJsiJk", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "csSNtwziubeM" - }, "source": [ "## Select data subset\n", "\n", "On the GPU, it would take a few minutes to perform inference for the entire dataset, but on the CPU, it would take quite a long time. While the defaults will exist for the whole dataset, if only the CPU is available for some reason, we encourage you to subsample the dataset." - ] + ], + "metadata": { + "id": "csSNtwziubeM" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "QoZ0fG8zuf5E" - }, - "outputs": [], "source": [ "#@title Manifest helper\n", "import json\n", @@ -856,27 +868,28 @@ " \n", " print(f\"Prepared subset manifest with {len(sub_manifest)} samples.\")\n", " return sub_manifest" - ] + ], + "metadata": { + "cellView": "form", + "id": "QoZ0fG8zuf5E" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "I8dnQRL6umrO" - }, - "outputs": [], "source": [ "manifest_data = read_manifest(manifest)\n", "print(f\"Read {len(manifest_data)} samples from manifest {manifest}\")" - ] + ], + "metadata": { + "id": "I8dnQRL6umrO" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "zZOTSFHsuswe" - }, - "outputs": [], "source": [ "num_samples = len(manifest_data)\n", "\n", @@ -884,26 +897,26 @@ "sub_manifest = subset_manifest(manifest_data, num_samples)\n", "audio_filepaths = [sample['audio_filepath'] for sample in sub_manifest]\n", "ground_texts = [sample['text'] for sample in sub_manifest]" - ] + ], + "metadata": { + "id": "zZOTSFHsuswe" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "GURl8G2Bwlad" - }, "source": [ "## Buffered Inference arguments\n", "\n", "Below we detail some critical arguments for buffered transducer inference. Note that the primary difference between streaming and buffered inference would be the chunk length, with larger values contributing to a lower word error rate but higher latency. " - ] + ], + "metadata": { + "id": "GURl8G2Bwlad" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "gF86J9Knwpe_" - }, - "outputs": [], "source": [ "chunk_len_in_secs: float = 8.0\n", "context_len_in_secs: float = 1.0\n", @@ -915,123 +928,123 @@ " \n", "##########################################################################\n", "buffer_len_in_secs = chunk_len_in_secs + 2* context_len_in_secs\n" - ] + ], + "metadata": { + "id": "gF86J9Knwpe_" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "C1s93TbcwZNt" - }, "source": [ "## Baseline: Middle Token Predictions\n", "\n", "Now compute the transcriptions over the data subset using the baseline algorithm - `Middle Token`. " - ] + ], + "metadata": { + "id": "C1s93TbcwZNt" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "PsqjMkeEu4oK" - }, - "outputs": [], "source": [ "asr_middle = streaming_utils.BatchedFrameASRRNNT(model, chunk_len_in_secs, buffer_len_in_secs,\n", " batch_size=batch_size, max_steps_per_timestep=max_steps_per_timestep)" - ] + ], + "metadata": { + "id": "PsqjMkeEu4oK" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "middle_transcripts = transcribe_buffers(asr_middle, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" + ], "metadata": { "id": "sNQNDjroxWb8" }, - "outputs": [], - "source": [ - "middle_transcripts = transcribe_buffers(asr_middle, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "ENGTX70QzcrB" - }, - "outputs": [], "source": [ "from nemo.collections.asr.metrics.wer import word_error_rate\n", "\n", "wer_middle = word_error_rate(middle_transcripts, ground_texts, use_cer=False)\n", "print(\"Middle token algorithm WER :\", wer_middle)" - ] + ], + "metadata": { + "id": "ENGTX70QzcrB" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "Z3hCweGDy12t" - }, "source": [ "## LCS Merge Predictions\n", "\n", "Next, let us compute the transcriptions over the data subset using the `LCS Merge` algorithm." - ] + ], + "metadata": { + "id": "Z3hCweGDy12t" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "E7DBDeBPx4cJ" - }, - "outputs": [], "source": [ "asr_lcs = LongestCommonSubsequenceBatchedFrameASRRNNT(model, chunk_len_in_secs, buffer_len_in_secs,\n", " batch_size=batch_size, max_steps_per_timestep=max_steps_per_timestep,\n", " alignment_basepath=lcs_alignments_path)" - ] + ], + "metadata": { + "id": "E7DBDeBPx4cJ" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "lcs_transcripts = transcribe_buffers(asr_lcs, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" + ], "metadata": { "id": "BQo9TNSyzPfv" }, - "outputs": [], - "source": [ - "lcs_transcripts = transcribe_buffers(asr_lcs, audio_filepaths, chunk_len_in_secs, buffer_len_in_secs, model_stride)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "IXW6I3hDzT6I" - }, - "outputs": [], "source": [ "wer_lcs = word_error_rate(lcs_transcripts, ground_texts, use_cer=False)\n", "print(\"LCS algorithm WER :\", wer_lcs)" - ] + ], + "metadata": { + "id": "IXW6I3hDzT6I" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "MjGfb1x00egs" - }, "source": [ "# Compare the text predictions from the two merge algorithms\n", "\n", "Depending on the data subset chosen (or randomly sampled), the WER for this algorithm may be higher or lower than the baseline. Note that if you select all the samples in the dataset, then the WER of this method is slightly higher than the baseline.\n", "\n", "We will do a more in-depth analysis of the failure cases below." - ] + ], + "metadata": { + "id": "MjGfb1x00egs" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "80NvUa1Y0dk7" - }, - "outputs": [], "source": [ "def compare_algorithms(ground_truth, middle_transcripts, lcs_transcripts, use_cer=False):\n", " worse = []\n", @@ -1055,38 +1068,37 @@ " print(\"Number of samples LCS merge was better than middle ground :\", len(better))\n", " print(\"Number of samples LCS merge was worse than middle ground :\", len(worse))\n", " return same, better, worse" - ] + ], + "metadata": { + "id": "80NvUa1Y0dk7" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "both_same, lcs_better, lcs_worse = compare_algorithms(ground_texts, middle_transcripts, lcs_transcripts, use_cer=False)" + ], "metadata": { "id": "A-NIFnjo0KB5" }, - "outputs": [], - "source": [ - "both_same, lcs_better, lcs_worse = compare_algorithms(ground_texts, middle_transcripts, lcs_transcripts, use_cer=False)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "awZNviRC5C-O" - }, "source": [ "# EXTRA: Compare the alignment matrices of LCS\n", "\n", "Over the entire dataset, there would be some samples where the `LCS Merge` algorithm did better than the `Middle Token` algorithm and vice-versa. Below, we will take a sample-level look at such cases, and since the `LCS Merge` algorithm is an alignment-based technique, we can visualize the alignment itself and determine what cases it failed and the source of the error in the alignment itself.\n" - ] + ], + "metadata": { + "id": "awZNviRC5C-O" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "id": "SiW4xw424lB1" - }, - "outputs": [], "source": [ "#@title LCS Alignment helper functions\n", "\n", @@ -1185,28 +1197,29 @@ " print()\n", "\n", " " - ] + ], + "metadata": { + "cellView": "form", + "id": "SiW4xw424lB1" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "_DEYtkP46Srw" - }, "source": [ "## Worse alignment\n", "\n", "Let us search for a sample where the `LCS Merge` did worse than the `Middle Token` algorithm. \n", "\n", "Such cases are necessary to analyze because it is visually apparent where the alignment went wrong. We can determine if there could be an extension to this algorithm to further improve such cases.\n" - ] + ], + "metadata": { + "id": "_DEYtkP46Srw" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "rt7c-qoH5a30" - }, - "outputs": [], "source": [ "worse_idx = find_first_sample_with_alignment(lcs_alignments_path, lcs_worse, start_idx=0)\n", "worse_sample = lcs_worse[worse_idx]\n", @@ -1214,39 +1227,39 @@ "print(\"A sample where LCS did worse than Middle Token merge algoritm :\")\n", "print(\"The texts are structured as (Ground Truth, Middle Token, LCS Merge)\")\n", "worse_sample" - ] + ], + "metadata": { + "id": "rt7c-qoH5a30" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "display_alignment_merge(lcs_alignments_path, worse_sample, print_xy_token_ids=False)" + ], "metadata": { "id": "_wBepfcH7kAK" }, - "outputs": [], - "source": [ - "display_alignment_merge(lcs_alignments_path, worse_sample, print_xy_token_ids=False)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "Z-xHYGIEJXBx" - }, "source": [ "## Better alignment\n", "\n", "Next, let us search for a sample where the `LCS Merge` did better than the `Middle Token` algorithm. \n", "\n", "Such cases are also essential to analyze because it is visually apparent where the alignment was better. We can determine if we can improve the `Middle Token` algorithm." - ] + ], + "metadata": { + "id": "Z-xHYGIEJXBx" + } }, { "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "AHBx3QpQE5OX" - }, - "outputs": [], "source": [ "better_idx = find_first_sample_with_alignment(lcs_alignments_path, lcs_better, start_idx=0)\n", "better_sample = lcs_better[better_idx]\n", @@ -1254,49 +1267,36 @@ "print(\"A sample where LCS did better than Middle Token merge algoritm :\")\n", "print(\"The texts are structured as (Ground Truth, Middle Token, LCS Merge)\")\n", "better_sample" - ] + ], + "metadata": { + "id": "AHBx3QpQE5OX" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "code", - "execution_count": null, + "source": [ + "display_alignment_merge(lcs_alignments_path, better_sample)" + ], "metadata": { "id": "urjYWVGfJhlU" }, - "outputs": [], - "source": [ - "display_alignment_merge(lcs_alignments_path, better_sample)" - ] + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", - "metadata": { - "id": "GRFifXuROpzg" - }, "source": [ "# Final notes\n", "\n", "Following the [Buffered Transducer Inference](https://github.com/NVIDIA/NeMo/blob/stable/tutorials/asr/Buffered_Transducer_Inference.ipynb) tutorial and designing a token merge algorithm that can be a simple extension to the baseline `Middle Token` algorithm, we see that there are cases where both algorithms have their uses. \n", "\n", "To expand our research effort on developing more sophisticated streaming / buffered transducer inference methods, we encourage the users to try these algorithms in script format for efficient inference on large datasets - available at [ASR Chunked Streaming Inference](https://github.com/NVIDIA/NeMo/blob/stable/examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py).\n" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [], - "name": "Buffered_Transducer_Inference_with_LCS_Merge.ipynb", - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "name": "python" + ], + "metadata": { + "id": "GRFifXuROpzg" + } } - }, - "nbformat": 4, - "nbformat_minor": 0 -} + ] +} \ No newline at end of file diff --git a/tutorials/nlp/MegatronBert_export.ipynb b/tutorials/nlp/MegatronBert_export.ipynb index 54ad754e4617..f925d2bc59b0 100644 --- a/tutorials/nlp/MegatronBert_export.ipynb +++ b/tutorials/nlp/MegatronBert_export.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='r1.13.0'" + "BRANCH='main'" ] }, { diff --git a/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb b/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb index c442913ec8ae..512a38bc90cc 100644 --- a/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb +++ b/tutorials/nlp/Multitask_Prompt_and_PTuning.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='r1.13.0'" + "BRANCH='main'" ] }, { diff --git a/tutorials/nlp/Question_Answering_Squad.ipynb b/tutorials/nlp/Question_Answering_Squad.ipynb deleted file mode 100755 index 532e82f9c216..000000000000 --- a/tutorials/nlp/Question_Answering_Squad.ipynb +++ /dev/null @@ -1,725 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "uRLPr0TnIAHO" - }, - "outputs": [], - "source": [ - "BRANCH = 'main'" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "o_0K1lsW1dj9" - }, - "outputs": [], - "source": [ - "\"\"\"\n", - "You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select \"GPU\" for hardware accelerator)\n", - "4. Run this cell to set up dependencies.\n", - "\"\"\"\n", - "# If you're using Google Colab and not running locally, run this cell\n", - "\n", - "# install NeMo\n", - "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "dzqD2WDFOIN-" - }, - "outputs": [], - "source": [ - "from nemo.utils.exp_manager import exp_manager\n", - "from nemo.collections import nlp as nemo_nlp\n", - "\n", - "import os\n", - "import wget \n", - "import torch\n", - "import pytorch_lightning as pl\n", - "from omegaconf import OmegaConf" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "daYw_Xll2ZR9" - }, - "source": [ - "# Task Description\n", - "Given a question and a context both in natural language, predict the span within the context with a start and end position which indicates the answer to the question.\n", - "For every word in our training dataset we’re going to predict:\n", - "- likelihood this word is the start of the span \n", - "- likelihood this word is the end of the span \n", - "\n", - "We are using a pretrained [BERT](https://arxiv.org/pdf/1810.04805.pdf) encoder with 2 span prediction heads for prediction start and end position of the answer. The span predictions are token classifiers consisting of a single linear layer. " - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZnuziSwJ1yEB" - }, - "source": [ - "# Dataset\n", - "This model expects the dataset to be in [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) format, e.g. a JSON file for each dataset split. \n", - "In the following we will show example for a training file. Each title has one or multiple paragraph entries, each consisting of the text - \"context\", and question-answer entries. Each question-answer entry has:\n", - "* a question\n", - "* a globally unique id\n", - "* a boolean flag \"is_impossible\" which shows if the question is answerable or not\n", - "* in case the question is answerable one answer entry, which contains the text span and its starting character index in the context. If not answerable, the \"answers\" list is empty\n", - "\n", - "The evaluation files (for validation and testing) follow the above format except for it can provide more than one answer to the same question. \n", - "The inference file follows the above format except for it does not require the \"answers\" and \"is_impossible\" keywords.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "TXFORGBv2Jqu" - }, - "source": [ - "\n", - "\n", - "```\n", - "{\n", - " \"data\": [\n", - " {\n", - " \"title\": \"Super_Bowl_50\", \n", - " \"paragraphs\": [\n", - " {\n", - " \"context\": \"Super Bowl 50 was an American football game to determine the champion of the National Football League (NFL) for the 2015 season. The American Football Conference (AFC) champion Denver Broncos defeated the National Football Conference (NFC) champion Carolina Panthers 24\\u201310 to earn their third Super Bowl title. The game was played on February 7, 2016, at Levi's Stadium in the San Francisco Bay Area at Santa Clara, California. As this was the 50th Super Bowl, the league emphasized the \\\"golden anniversary\\\" with various gold-themed initiatives, as well as temporarily suspending the tradition of naming each Super Bowl game with Roman numerals (under which the game would have been known as \\\"Super Bowl L\\\"), so that the logo could prominently feature the Arabic numerals 50.\", \n", - " \"qas\": [\n", - " {\n", - " \"question\": \"Where did Super Bowl 50 take place?\", \n", - " \"is_impossible\": \"false\", \n", - " \"id\": \"56be4db0acb8001400a502ee\", \n", - " \"answers\": [\n", - " {\n", - " \"answer_start\": \"403\", \n", - " \"text\": \"Santa Clara, California\"\n", - " }\n", - " ]\n", - " },\n", - " {\n", - " \"question\": \"What was the winning score of the Super Bowl 50?\", \n", - " \"is_impossible\": \"true\", \n", - " \"id\": \"56be4db0acb8001400a502ez\", \n", - " \"answers\": [\n", - " ]\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - " }\n", - " ]\n", - "}\n", - "...\n", - "```\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SL58EWkd2ZVb" - }, - "source": [ - "## Download the data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "THi6s1Qx2G1k" - }, - "source": [ - "In this notebook we are going download the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset to showcase how to do training and inference. There are two datasets, SQuAD1.0 and SQuAD2.0. SQuAD 1.1, the previous version of the SQuAD dataset, contains 100,000+ question-answer pairs on 500+ articles. SQuAD2.0 dataset combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. \n", - "\n", - "\n", - "To download both datasets, we use [NeMo/examples/nlp/question_answering/get_squad.py](https://github.com/NVIDIA/NeMo/blob/stable/examples/nlp/question_answering/get_squad.py). \n", - "\n", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "tv3qXTTR_hBk" - }, - "outputs": [], - "source": [ - "# set the following paths\n", - "DATA_DIR = \"PATH_TO_DATA\"\n", - "WORK_DIR = \"PATH_TO_CHECKPOINTS_AND_LOGS\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qcz3Djem_hBn" - }, - "outputs": [], - "source": [ - "## download get_squad.py script to download and preprocess the SQuAD data\n", - "os.makedirs(WORK_DIR, exist_ok=True)\n", - "if not os.path.exists(WORK_DIR + '/get_squad.py'):\n", - " print('Downloading get_squad.py...')\n", - " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/question_answering/get_squad.py', WORK_DIR)\n", - "else:\n", - " print ('get_squad.py already exists')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mpzsC41t_hBq" - }, - "outputs": [], - "source": [ - "# download and preprocess the data\n", - "! python $WORK_DIR/get_squad.py --destDir $DATA_DIR" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "m_HLLl6t_hBs" - }, - "source": [ - "after execution of the above cell, your data folder will contain a subfolder \"squad\" the following 4 files for training and evaluation\n", - "- v1.1/train-v1.1.json\n", - "- v1.1/dev-v1.1.json\n", - "- v2.0/train-v2.0.json\n", - "- v2.0/dev-v2.0.json" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "qYHcfxPL_hBt" - }, - "outputs": [], - "source": [ - "! ls -LR {DATA_DIR}/squad" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bdpikZVreLlI" - }, - "source": [ - "## Data preprocessing\n", - "\n", - "The input into the model is the concatenation of two tokenized sequences:\n", - "\" [CLS] query [SEP] context [SEP]\".\n", - "This is the tokenization used for BERT, i.e. [WordPiece](https://arxiv.org/pdf/1609.08144.pdf) Tokenizer, which uses the [Google's BERT vocabulary](https://github.com/google-research/bert). This tokenizer is configured with `model.tokenizer.tokenizer_name=bert-base-uncased` and is automatically instantiated using [Huggingface](https://huggingface.co/)'s API. \n", - "The benefit of this tokenizer is that this is compatible with a pretrained BERT model, from which we can finetune instead of training the question answering model from scratch. However, we also support other tokenizers, such as `model.tokenizer.tokenizer_name=sentencepiece`. Unlike the BERT WordPiece tokenizer, the [SentencePiece](https://github.com/google/sentencepiece) tokenizer model needs to be first created from a text file.\n", - "See [02_NLP_Tokenizers.ipynb](https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/nlp/02_NLP_Tokenizers.ipynb) for more details on how to use NeMo Tokenizers." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0q7Y7nyW_hBv" - }, - "source": [ - "# Data and Model Parameters\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "B0b0Tn8M_hBv" - }, - "source": [ - "Note, this is only an example to showcase usage and is not optimized for accuracy. In the following, we will download and adjust the model configuration to create a toy example, where we only use a small fraction of the original dataset. \n", - "\n", - "In order to train the full SQuAD model, leave the model parameters from the configuration file unchanged. This sets NUM_SAMPLES=-1 to use the entire dataset, which will slow down performance significantly. We recommend to use bash script and multi-GPU to accelerate this. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "n8HZrDmr12_-" - }, - "outputs": [], - "source": [ - "# This is the model configuration file that we will download, do not change this\n", - "MODEL_CONFIG = \"question_answering_squad_config.yaml\"\n", - "\n", - "# model parameters, play with these\n", - "BATCH_SIZE = 12\n", - "MAX_SEQ_LENGTH = 384\n", - "# specify BERT-like model, you want to use\n", - "PRETRAINED_BERT_MODEL = \"bert-base-uncased\"\n", - "TOKENIZER_NAME = \"bert-base-uncased\" # tokenizer name\n", - "\n", - "# Number of data examples used for training, validation, test and inference\n", - "TRAIN_NUM_SAMPLES = VAL_NUM_SAMPLES = TEST_NUM_SAMPLES = 5000 \n", - "INFER_NUM_SAMPLES = 5\n", - "\n", - "TRAIN_FILE = f\"{DATA_DIR}/squad/v1.1/train-v1.1.json\"\n", - "VAL_FILE = f\"{DATA_DIR}/squad/v1.1/dev-v1.1.json\"\n", - "TEST_FILE = f\"{DATA_DIR}/squad/v1.1/dev-v1.1.json\"\n", - "INFER_FILE = f\"{DATA_DIR}/squad/v1.1/dev-v1.1.json\"\n", - "\n", - "INFER_PREDICTION_OUTPUT_FILE = \"output_prediction.json\"\n", - "INFER_NBEST_OUTPUT_FILE = \"output_nbest.json\"\n", - "\n", - "# training parameters\n", - "LEARNING_RATE = 0.00003\n", - "\n", - "# number of epochs\n", - "MAX_EPOCHS = 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "daludzzL2Jba" - }, - "source": [ - "# Model Configuration" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_whKCxfTMo6Y" - }, - "source": [ - "The model is defined in a config file which declares multiple important sections. They are:\n", - "- **model**: All arguments that will relate to the Model - language model, span prediction, optimizer and schedulers, datasets and any other related information\n", - "\n", - "- **trainer**: Any argument to be passed to PyTorch Lightning" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "T1gA8PsJ13MJ" - }, - "outputs": [], - "source": [ - "# download the model's default configuration file \n", - "config_dir = WORK_DIR + '/configs/'\n", - "os.makedirs(config_dir, exist_ok=True)\n", - "if not os.path.exists(config_dir + MODEL_CONFIG):\n", - " print('Downloading config file...')\n", - " wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/question_answering/conf/{MODEL_CONFIG}', config_dir)\n", - "else:\n", - " print ('config file is already exists')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mX3KmWMvSUQw" - }, - "outputs": [], - "source": [ - "# this line will print the entire default config of the model\n", - "config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'\n", - "print(config_path)\n", - "config = OmegaConf.load(config_path)\n", - "print(OmegaConf.to_yaml(config))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ZCgWzNBkaQLZ" - }, - "source": [ - "## Setting up data within the config\n", - "\n", - "Among other things, the config file contains dictionaries called dataset, train_ds and validation_ds, test_ds. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.\n", - "\n", - "Specify data paths using `model.train_ds.file`, `model.valuation_ds.file` and `model.test_ds.file`.\n", - "\n", - "Let's now add the data paths to the config." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "LQHCJN-ZaoLp" - }, - "outputs": [], - "source": [ - "config.model.train_ds.file = TRAIN_FILE\n", - "config.model.validation_ds.file = VAL_FILE\n", - "config.model.test_ds.file = TEST_FILE\n", - "\n", - "config.model.train_ds.num_samples = TRAIN_NUM_SAMPLES\n", - "config.model.validation_ds.num_samples = VAL_NUM_SAMPLES\n", - "config.model.test_ds.num_samples = TEST_NUM_SAMPLES\n", - "\n", - "config.model.tokenizer.tokenizer_name = TOKENIZER_NAME" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "nB96-3sTc3yk" - }, - "source": [ - "# Building the PyTorch Lightning Trainer\n", - "\n", - "NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem!\n", - "\n", - "Let's first instantiate a Trainer object!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "knF6QeQQdMrH" - }, - "outputs": [], - "source": [ - "# lets modify some trainer configs\n", - "# checks if we have GPU available and uses it\n", - "accelerator = 'gpu' if torch.cuda.is_available() else 'cpu'\n", - "config.trainer.devices = 1\n", - "config.trainer.accelerator = accelerator\n", - "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", - "\n", - "# For mixed precision training, use precision=16 and amp_level=O1\n", - "\n", - "config.trainer.max_epochs = MAX_EPOCHS\n", - "\n", - "# Remove distributed training flags if only running on a single GPU or CPU\n", - "config.trainer.strategy = None\n", - "\n", - "print(\"Trainer config - \\n\")\n", - "print(OmegaConf.to_yaml(config.trainer))\n", - "\n", - "trainer = pl.Trainer(**config.trainer)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8IlEMdVxdr6p" - }, - "source": [ - "# Setting up a NeMo Experiment¶\n", - "\n", - "NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "8uztqGAmdrYt" - }, - "outputs": [], - "source": [ - "config.exp_manager.exp_dir = WORK_DIR\n", - "exp_dir = exp_manager(trainer, config.get(\"exp_manager\", None))\n", - "\n", - "# the exp_dir provides a path to the current experiment for easy access\n", - "exp_dir = str(exp_dir)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "D4jy28fbjekD" - }, - "source": [ - "# Using an Out-Of-Box Model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Ins2ZzJckKKo" - }, - "outputs": [], - "source": [ - "# list available pretrained models\n", - "nemo_nlp.models.QAModel.list_available_models()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "iFnzHvkVk-S5" - }, - "outputs": [], - "source": [ - "# load pretained model\n", - "pretrained_model_name=\"qa_squadv1.1_bertbase\"\n", - "model = nemo_nlp.models.QAModel.from_pretrained(model_name=pretrained_model_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6FI_nQsJo_11" - }, - "source": [ - "# Model Training" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8tjLhUvL_o7_" - }, - "source": [ - "Before initializing the model, we might want to modify some of the model configs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Xeuc2i7Y_nP5" - }, - "outputs": [], - "source": [ - "# complete list of supported BERT-like models\n", - "nemo_nlp.modules.get_pretrained_lm_models_list()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "RK2xglXyAUOO" - }, - "outputs": [], - "source": [ - "# add the specified above model parameters to the config\n", - "config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL\n", - "config.model.train_ds.batch_size = BATCH_SIZE\n", - "config.model.validation_ds.batch_size = BATCH_SIZE\n", - "config.model.test_ds.batch_size = BATCH_SIZE\n", - "config.model.optim.lr = LEARNING_RATE\n", - "\n", - "print(\"Updated model config - \\n\")\n", - "print(OmegaConf.to_yaml(config.model))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "NgsGLydWo-6-" - }, - "outputs": [], - "source": [ - "# initialize the model\n", - "# dataset we'll be prepared for training and evaluation during\n", - "model = nemo_nlp.models.QAModel(cfg=config.model, trainer=trainer)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "kQ592Tx4pzyB" - }, - "source": [ - "## Monitoring Training Progress\n", - "Optionally, you can create a Tensorboard visualization to monitor training progress." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "mTJr16_pp0aS" - }, - "outputs": [], - "source": [ - "try:\n", - " from google import colab\n", - " COLAB_ENV = True\n", - "except (ImportError, ModuleNotFoundError):\n", - " COLAB_ENV = False\n", - "\n", - "# Load the TensorBoard notebook extension\n", - "if COLAB_ENV:\n", - " %load_ext tensorboard\n", - " %tensorboard --logdir {exp_dir}\n", - "else:\n", - " print(\"To use tensorboard, please use this notebook in a Google Colab environment.\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "hUvnSpyjp0Dh" - }, - "outputs": [], - "source": [ - "# start the training\n", - "trainer.fit(model)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JxBiIKMlH8yv" - }, - "source": [ - "After training for 1 epoch, exact match on the evaluation data should be around 59.2%, F1 around 70.2%." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ynCLBmAWFVsM" - }, - "source": [ - "# Evaluation\n", - "\n", - "To see how the model performs, let’s run evaluation on the test dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "XBMCoXAKFtSd" - }, - "outputs": [], - "source": [ - "model.setup_test_data(test_data_config=config.model.test_ds)\n", - "trainer.test(model)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "VPdzJVAgSFaJ" - }, - "source": [ - "# Inference\n", - "\n", - "To use the model for creating predictions, let’s run inference on the unlabeled inference dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "DQhsamclRtxJ" - }, - "outputs": [], - "source": [ - "# # store test prediction under the experiment output folder\n", - "output_prediction_file = f\"{exp_dir}/{INFER_PREDICTION_OUTPUT_FILE}\"\n", - "output_nbest_file = f\"{exp_dir}/{INFER_NBEST_OUTPUT_FILE}\"\n", - "all_preds, all_nbests = model.inference(file=INFER_FILE, batch_size=5, num_samples=INFER_NUM_SAMPLES, output_nbest_file=output_nbest_file, output_prediction_file=output_prediction_file)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "sQpRIOaM_hCQ" - }, - "outputs": [], - "source": [ - "for _, item in all_preds.items():\n", - " print(f\"question: {item[0]} answer: {item[1]}\")\n", - "#The prediction file contains the predicted answer to each question id for the first TEST_NUM_SAMPLES.\n", - "! python -m json.tool $exp_dir/$INFER_PREDICTION_OUTPUT_FILE" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ref1qSonGNhP" - }, - "source": [ - "If you have NeMo installed locally, you can also train the model with \n", - "[NeMo/examples/nlp/question_answering/get_squad.py](https://github.com/NVIDIA/NeMo/blob/stable/examples/nlp/question_answering/question_answering_squad.py).\n", - "\n", - "To run training script, use:\n", - "\n", - "`python question_answering_squad.py model.train_ds.file=TRAIN_FILE model.validation_ds.file=VAL_FILE model.test_ds.file=TEST_FILE`\n", - "\n", - "To improve the performance of the model, train with multi-GPU and a global batch size of 24. So if you use 8 GPUs with `trainer.devices=8`, set `model.train_ds.batch_size=3`" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [ - "daYw_Xll2ZR9" - ], - "name": "Question_Answering_Squad.ipynb", - "private_outputs": true, - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.6" - }, - "pycharm": { - "stem_cell": { - "cell_type": "raw", - "metadata": { - "collapsed": false - }, - "source": [] - } - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/tutorials/nlp/Token_Classification-BioMegatron.ipynb b/tutorials/nlp/Token_Classification-BioMegatron.ipynb index 304befe44a14..b07dfb061625 100644 --- a/tutorials/nlp/Token_Classification-BioMegatron.ipynb +++ b/tutorials/nlp/Token_Classification-BioMegatron.ipynb @@ -7,7 +7,7 @@ "metadata": {}, "outputs": [], "source": [ - "BRANCH='r1.13.0'" + "BRANCH='main'" ] }, { diff --git a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb index dd9278666a28..69df7b27b02d 100644 --- a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb +++ b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb @@ -671,4 +671,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb index 6204bf2516bb..b72cee51003b 100644 --- a/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb +++ b/tutorials/text_processing/ITN_with_Thutmose_Tagger.ipynb @@ -1137,4 +1137,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb index 596523b41c0a..bbf4f2decc6b 100755 --- a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb +++ b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb @@ -1,468 +1,468 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "a5fA5qAm5Afg" - }, - "outputs": [], - "source": [ - "\"\"\"\n", - "You can run either this notebook locally or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Optional: Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> **_NOTE:_** Find the official NeMo documentation at \n", - "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Overview\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "F-IrnmXMTevr" - }, - "source": [ - "A sentence can be split up into semiotic tokens stemming from a variety of classes, where the spoken form differs from the written form. Examples are *dates*, *decimals*, *cardinals*, *measures* etc. The good TN or ITN system will be able to handle a variety of **semiotic classes**." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-IT1Xr9iW2Xr" - }, - "source": [ - "# How to use\n", - "## 1. Installation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Install NeMo, which installs both nemo and nemo_text_processing package\n", - "BRANCH = 'main'\n", - "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", - "\n", - "# install Pynini for text normalization\n", - "! wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/install_pynini.sh\n", - "! bash install_pynini.sh" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# try to import of nemo_text_processing an other dependencies\n", - "import nemo_text_processing\n", - "import os" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Text Normalization" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Bfs7fa9lXDDh" - }, - "outputs": [], - "source": [ - "# create text normalization instance that works on cased input\n", - "from nemo_text_processing.text_normalization.normalize import Normalizer\n", - "normalizer = Normalizer(input_case='cased', lang='en')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# the normalizer class offers the following parameterization. \n", - "print(normalizer.__doc__)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> **_NOTE:_** Standard Text Normalization uses `deterministic=True`, outputting a single output for a given input string\n", - "\n", - "\n", - "\n", - "### 2.1 Run TN on input string" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Normalizer.normalize() offers the following parameterization\n", - "print(normalizer.normalize.__doc__)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run normalization on example string input\n", - "written = \"We paid $123 for this desk.\"\n", - "normalized = normalizer.normalize(written, verbose=True, punct_post_process=True)\n", - "print(normalized)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "intermediate semiotic class information is shown if verbose=True. \n", - "\n", - "Long input text could be split into sentences as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "written = \"Mr. Smith paid $111 in U.S.A. on Dec. 17th. We paid $123 for this desk.\"\n", - "\n", - "# split long text into sentences\n", - "sentences = normalizer.split_text_into_sentences(written)\n", - "\n", - "for sent in sentences:\n", - " print(sent)\n", - "\n", - "# normalize each sentence separately using normalize() or all sentences at once with normalize_list()\n", - "normalizer.normalize_list(sentences)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### 2.2 Run TN on list of input strings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UD-OuFmEOX3T" - }, - "outputs": [], - "source": [ - "# create temporary data folder and example input file\n", - "DATA_DIR = 'tmp_data_dir'\n", - "os.makedirs(DATA_DIR, exist_ok=True)\n", - "INPUT_FILE = f'{DATA_DIR}/inference.txt'\n", - "! echo -e 'The alarm went off at 10:00a.m. \\nI received $123' > $INPUT_FILE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "d4T0gXHwY3JZ" - }, - "outputs": [], - "source": [ - "# check input file was properly created\n", - "! cat $INPUT_FILE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# load input file into 'data' - a list of strings\n", - "data = []\n", - "with open(INPUT_FILE, 'r') as fp:\n", - " for line in fp:\n", - " data.append(line.strip())\n", - "data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "F5wSJTI8ZFRg" - }, - "outputs": [], - "source": [ - "# run normalization on 'data'\n", - "normalizer.normalize_list(data, punct_post_process=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RMT5lkPYzZHK" - }, - "source": [ - "### 2.3 Evaluate TN on written-normalized text pairs \n", - "\n", - "The evaluation data needs to have the following format:\n", - "\n", - "'on 22 july 2022 they worked until 12:00' and the normalization is represented as " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# example evaluation sentence\n", - "eval_text = \"\"\"PLAIN\\ton\\t\n", - "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", - "PLAIN\\tthey\\t\n", - "PLAIN\\tworked\\t\n", - "PLAIN\\tuntil\\t\n", - "TIME\\t12:00\\ttwelve o'clock\n", - "\\t\n", - "\"\"\"\n", - "EVAL_FILE = f'{DATA_DIR}/eval.txt'\n", - "with open(EVAL_FILE, 'w') as fp:\n", - " fp.write(eval_text)\n", - "! cat $EVAL_FILE" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RMT5lkPYzZHK" - }, - "source": [ - "That is, every sentence is broken into semiotic tokens line by line and concluded by end of sentence token ``. In case of a plain token it's `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] `, otherwise `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] [NORMALIZED]`.\n", - "This format was introduced in [Google Text normalization dataset](https://arxiv.org/abs/1611.00068). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Parse evaluation file into written and normalized sentence pairs\n", - "from nemo_text_processing.text_normalization.data_loader_utils import load_files, training_data_to_sentences\n", - "eval_data = load_files([EVAL_FILE])\n", - "sentences_un_normalized, sentences_normalized, sentences_class_types = training_data_to_sentences(eval_data)\n", - "print(list(zip(sentences_un_normalized, sentences_normalized)))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run prediction\n", - "sentences_prediction = normalizer.normalize_list(sentences_un_normalized)\n", - "print(sentences_prediction)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# measure sentence accuracy\n", - "from nemo_text_processing.text_normalization.data_loader_utils import evaluate\n", - "sentences_accuracy = evaluate(\n", - " preds=sentences_prediction, labels=sentences_normalized, input=sentences_un_normalized\n", - " )\n", - "print(\"- Accuracy: \" + str(sentences_accuracy))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Inverse Text Normalization\n", - "ITN supports equivalent API as TN. Here we are only going to show inverse normalization on input string" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# create inverse text normalization instance\n", - "from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer\n", - "inverse_normalizer = InverseNormalizer(lang='en')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run ITN on example string input\n", - "spoken = \"we paid one hundred twenty three dollars for this desk\"\n", - "un_normalized = inverse_normalizer.inverse_normalize(spoken, verbose=True)\n", - "print(un_normalized)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4. Audio-based Text Normalization\n", - "Audio-based text normalization uses extended [WFST](https://en.wikipedia.org/wiki/Finite-state_machine) grammars to provide a range of possible normalization options.\n", - "The following example shows the workflow: (Disclaimer: exact values in graphic do not need to be real system's behavior)\n", - "1. text \"627\" is sent to extended TN WFST grammar\n", - "2. grammar output 5 different options of verbalization based on text input alone\n", - "3. in case an audio file is presented we compare the audio transcript with the verbalization options to find out which normalization is correct based on character error rate. The transcript is generated using a pretrained NeMo ASR model. \n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following shows an example of how to generate multiple normalization options:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import non-deterministic WFST-based TN module\n", - "from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# initialize normalizer, this may take some time to generate the extended grammars. \n", - "# Thus, we recommend to cache the grammars by specifying a cache directory\n", - "normalizer = NormalizerWithAudio(\n", - " lang=\"en\",\n", - " input_case=\"cased\",\n", - " overwrite_cache=False,\n", - " cache_dir=\"cache_dir\",\n", - " )\n", - "# create up to 10 normalization options\n", - "print(normalizer.normalize(\"123\", n_tagged=10, punct_post_process=True))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Parallel execution\n", - "\n", - "`Normalizer.normalize()` as well as `InverseNormalizer.inverse_normalize()` are functions without side effect.\n", - "Thus, if you need to normalize large amounts of input examples, these can be executed in parallel." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ENMDNl9C4TkF" - }, - "source": [ - "# Tutorial on how to customize grammars\n", - "\n", - "https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lcvT3P2lQ_GS" - }, - "source": [ - "# References and Further Reading:\n", - "\n", - "\n", - "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", - "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", - "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", - "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [ - "lcvT3P2lQ_GS" - ], - "name": "Text_Normalization_Tutorial.ipynb", - "private_outputs": true, - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a5fA5qAm5Afg" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Optional: Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **_NOTE:_** Find the official NeMo documentation at \n", + "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Overview\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F-IrnmXMTevr" + }, + "source": [ + "A sentence can be split up into semiotic tokens stemming from a variety of classes, where the spoken form differs from the written form. Examples are *dates*, *decimals*, *cardinals*, *measures* etc. The good TN or ITN system will be able to handle a variety of **semiotic classes**." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-IT1Xr9iW2Xr" + }, + "source": [ + "# How to use\n", + "## 1. Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Install NeMo, which installs both nemo and nemo_text_processing package\n", + "BRANCH = 'main'\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", + "\n", + "# install Pynini for text normalization\n", + "! wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/install_pynini.sh\n", + "! bash install_pynini.sh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# try to import of nemo_text_processing an other dependencies\n", + "import nemo_text_processing\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Text Normalization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Bfs7fa9lXDDh" + }, + "outputs": [], + "source": [ + "# create text normalization instance that works on cased input\n", + "from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "normalizer = Normalizer(input_case='cased', lang='en')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# the normalizer class offers the following parameterization. \n", + "print(normalizer.__doc__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **_NOTE:_** Standard Text Normalization uses `deterministic=True`, outputting a single output for a given input string\n", + "\n", + "\n", + "\n", + "### 2.1 Run TN on input string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Normalizer.normalize() offers the following parameterization\n", + "print(normalizer.normalize.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run normalization on example string input\n", + "written = \"We paid $123 for this desk.\"\n", + "normalized = normalizer.normalize(written, verbose=True, punct_post_process=True)\n", + "print(normalized)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "intermediate semiotic class information is shown if verbose=True. \n", + "\n", + "Long input text could be split into sentences as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "written = \"Mr. Smith paid $111 in U.S.A. on Dec. 17th. We paid $123 for this desk.\"\n", + "\n", + "# split long text into sentences\n", + "sentences = normalizer.split_text_into_sentences(written)\n", + "\n", + "for sent in sentences:\n", + " print(sent)\n", + "\n", + "# normalize each sentence separately using normalize() or all sentences at once with normalize_list()\n", + "normalizer.normalize_list(sentences)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "### 2.2 Run TN on list of input strings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UD-OuFmEOX3T" + }, + "outputs": [], + "source": [ + "# create temporary data folder and example input file\n", + "DATA_DIR = 'tmp_data_dir'\n", + "os.makedirs(DATA_DIR, exist_ok=True)\n", + "INPUT_FILE = f'{DATA_DIR}/inference.txt'\n", + "! echo -e 'The alarm went off at 10:00a.m. \\nI received $123' > $INPUT_FILE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d4T0gXHwY3JZ" + }, + "outputs": [], + "source": [ + "# check input file was properly created\n", + "! cat $INPUT_FILE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load input file into 'data' - a list of strings\n", + "data = []\n", + "with open(INPUT_FILE, 'r') as fp:\n", + " for line in fp:\n", + " data.append(line.strip())\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "F5wSJTI8ZFRg" + }, + "outputs": [], + "source": [ + "# run normalization on 'data'\n", + "normalizer.normalize_list(data, punct_post_process=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RMT5lkPYzZHK" + }, + "source": [ + "### 2.3 Evaluate TN on written-normalized text pairs \n", + "\n", + "The evaluation data needs to have the following format:\n", + "\n", + "'on 22 july 2022 they worked until 12:00' and the normalization is represented as " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# example evaluation sentence\n", + "eval_text = \"\"\"PLAIN\\ton\\t\n", + "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", + "PLAIN\\tthey\\t\n", + "PLAIN\\tworked\\t\n", + "PLAIN\\tuntil\\t\n", + "TIME\\t12:00\\ttwelve o'clock\n", + "\\t\n", + "\"\"\"\n", + "EVAL_FILE = f'{DATA_DIR}/eval.txt'\n", + "with open(EVAL_FILE, 'w') as fp:\n", + " fp.write(eval_text)\n", + "! cat $EVAL_FILE" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RMT5lkPYzZHK" + }, + "source": [ + "That is, every sentence is broken into semiotic tokens line by line and concluded by end of sentence token ``. In case of a plain token it's `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] `, otherwise `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] [NORMALIZED]`.\n", + "This format was introduced in [Google Text normalization dataset](https://arxiv.org/abs/1611.00068). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Parse evaluation file into written and normalized sentence pairs\n", + "from nemo_text_processing.text_normalization.data_loader_utils import load_files, training_data_to_sentences\n", + "eval_data = load_files([EVAL_FILE])\n", + "sentences_un_normalized, sentences_normalized, sentences_class_types = training_data_to_sentences(eval_data)\n", + "print(list(zip(sentences_un_normalized, sentences_normalized)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run prediction\n", + "sentences_prediction = normalizer.normalize_list(sentences_un_normalized)\n", + "print(sentences_prediction)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# measure sentence accuracy\n", + "from nemo_text_processing.text_normalization.data_loader_utils import evaluate\n", + "sentences_accuracy = evaluate(\n", + " preds=sentences_prediction, labels=sentences_normalized, input=sentences_un_normalized\n", + " )\n", + "print(\"- Accuracy: \" + str(sentences_accuracy))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Inverse Text Normalization\n", + "ITN supports equivalent API as TN. Here we are only going to show inverse normalization on input string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create inverse text normalization instance\n", + "from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer\n", + "inverse_normalizer = InverseNormalizer(lang='en')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run ITN on example string input\n", + "spoken = \"we paid one hundred twenty three dollars for this desk\"\n", + "un_normalized = inverse_normalizer.inverse_normalize(spoken, verbose=True)\n", + "print(un_normalized)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Audio-based Text Normalization\n", + "Audio-based text normalization uses extended [WFST](https://en.wikipedia.org/wiki/Finite-state_machine) grammars to provide a range of possible normalization options.\n", + "The following example shows the workflow: (Disclaimer: exact values in graphic do not need to be real system's behavior)\n", + "1. text \"627\" is sent to extended TN WFST grammar\n", + "2. grammar output 5 different options of verbalization based on text input alone\n", + "3. in case an audio file is presented we compare the audio transcript with the verbalization options to find out which normalization is correct based on character error rate. The transcript is generated using a pretrained NeMo ASR model. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following shows an example of how to generate multiple normalization options:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import non-deterministic WFST-based TN module\n", + "from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize normalizer, this may take some time to generate the extended grammars. \n", + "# Thus, we recommend to cache the grammars by specifying a cache directory\n", + "normalizer = NormalizerWithAudio(\n", + " lang=\"en\",\n", + " input_case=\"cased\",\n", + " overwrite_cache=False,\n", + " cache_dir=\"cache_dir\",\n", + " )\n", + "# create up to 10 normalization options\n", + "print(normalizer.normalize(\"123\", n_tagged=10, punct_post_process=True))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Parallel execution\n", + "\n", + "`Normalizer.normalize()` as well as `InverseNormalizer.inverse_normalize()` are functions without side effect.\n", + "Thus, if you need to normalize large amounts of input examples, these can be executed in parallel." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ENMDNl9C4TkF" + }, + "source": [ + "# Tutorial on how to customize grammars\n", + "\n", + "https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lcvT3P2lQ_GS" + }, + "source": [ + "# References and Further Reading:\n", + "\n", + "\n", + "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", + "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", + "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", + "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [ + "lcvT3P2lQ_GS" + ], + "name": "Text_Normalization_Tutorial.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 1 + } \ No newline at end of file From 8bfafea2f58caa20d20d657278927c2c74addbc7 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 14 Dec 2022 00:35:30 -0800 Subject: [PATCH 223/244] more fixes Signed-off-by: Evgeniy Shabalin --- tutorials/nlp/Text2Sparql.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tutorials/nlp/Text2Sparql.ipynb b/tutorials/nlp/Text2Sparql.ipynb index 0370831bf732..69ccdaccadc9 100644 --- a/tutorials/nlp/Text2Sparql.ipynb +++ b/tutorials/nlp/Text2Sparql.ipynb @@ -149,7 +149,7 @@ "WORK_DIR = \"PATH_TO_CHECKPOINTS_AND_LOGS\"\n", "\n", "# NeMo Version\n", - "BRANCH = 'r1.13.0'\n" + "BRANCH = 'main'\n" ] }, { @@ -2260,4 +2260,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} From 269c444dbd03f76d42464116475d6418d3e7da25 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 14 Dec 2022 00:55:19 -0800 Subject: [PATCH 224/244] added experimental tag --- nemo/collections/tts/models/vits.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 0c20ae65ad48..2d2e6091cc93 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -34,6 +34,7 @@ from nemo.core.classes.common import PretrainedModelInfo from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils +from nemo.utils.decorators.experimental import experimental HAVE_WANDB = True try: From 70f1c9c24c759997c3ab1d5c2773f1fc88626029 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 15 Dec 2022 04:11:22 -0800 Subject: [PATCH 225/244] Clarification updates Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/helpers/helpers.py | 11 +++--- nemo/collections/tts/losses/vits_losses.py | 9 ++--- nemo/collections/tts/models/base.py | 2 +- nemo/collections/tts/modules/vits_modules.py | 36 ++++++++++---------- 4 files changed, 30 insertions(+), 28 deletions(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index b5b0608033cd..147c3b352161 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -528,6 +528,9 @@ def split_view(tensor, split_size: int, dim: int = 0): def slice_segments(x, ids_str, segment_size=4): + """ + Slices segments from batch + """ ret = torch.zeros_like(x[:, :, :segment_size]) for i in range(x.size(0)): idx_str = ids_str[i] @@ -541,6 +544,9 @@ def slice_segments(x, ids_str, segment_size=4): def rand_slice_segments(x, x_lengths=None, segment_size=4): + """ + Chooses random indices and lices segments from batch + """ b, d, t = x.size() if x_lengths is None: x_lengths = t @@ -571,11 +577,6 @@ def clip_grad_value_(parameters, clip_value, norm_type=2): return total_norm -def intersperse(lst, item): - result = [item] * (len(lst) * 2 + 1) - result[1::2] = lst - return result - def convert_pad_shape(pad_shape): l = pad_shape[::-1] diff --git a/nemo/collections/tts/losses/vits_losses.py b/nemo/collections/tts/losses/vits_losses.py index 2cb54d33c119..b2945a2aa362 100644 --- a/nemo/collections/tts/losses/vits_losses.py +++ b/nemo/collections/tts/losses/vits_losses.py @@ -64,8 +64,10 @@ def output_types(self): @typecheck() def forward(self, z_p, logs_q, m_p, logs_p, z_mask): """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] + z_p: Input distribution + logs_q: LogVariance of target distrubution + m_p: Mean of input distrubution + logs_p: LogVariance of input distrubution """ z_p = z_p.float() logs_q = logs_q.float() @@ -124,8 +126,7 @@ def input_types(self): @property def output_types(self): return { - "real_loss": NeuralType(elements_type=LossType()), - # "gen_loss": NeuralType(elements_type=LossType()), + "loss": NeuralType(elements_type=LossType()), "real_losses": [NeuralType(elements_type=LossType())], "fake_losses": [NeuralType(elements_type=LossType())], } diff --git a/nemo/collections/tts/models/base.py b/nemo/collections/tts/models/base.py index 8aa38de62da4..37692be9b0a8 100644 --- a/nemo/collections/tts/models/base.py +++ b/nemo/collections/tts/models/base.py @@ -235,7 +235,7 @@ class TextToWaveform(ModelPT, ABC): @abstractmethod def parse(self, str_input: str, **kwargs) -> 'torch.tensor': """ - A helper function that accepts raw python strings and turns them into a tensor. The tensor should have 2 + A helper function that accepts a raw python string and turns it into a tensor. The tensor should have 2 dimensions. The first is the batch, which should be of size 1. The second should represent time. The tensor should represent either tokenized or embedded text, depending on the model. """ diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 99ef7c64c1f6..2ff5ae0f378d 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -846,14 +846,14 @@ def __init__( self.emb_g = nn.Embedding(n_speakers, gin_channels) def forward(self, text, text_len, spec, spec_len, sid=None): - x, mean_prior, logscale_prior, x_mask = self.enc_p(text, text_len) + x, mean_prior, logscale_prior, text_mask = self.enc_p(text, text_len) if self.n_speakers > 1: g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None - z, mean_posterior, logscale_posterior, y_mask = self.enc_q(spec, spec_len, g=g) - z_p = self.flow(z, y_mask, g=g) + z, mean_posterior, logscale_posterior, spec_mask = self.enc_q(spec, spec_len, g=g) + z_p = self.flow(z, spec_mask, g=g) with torch.no_grad(): # negative cross-entropy @@ -868,17 +868,17 @@ def forward(self, text, text_len, spec, spec_len, sid=None): neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) + attn_mask = torch.unsqueeze(text_mask, 2) * torch.unsqueeze(y_mask, -1) attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() w = attn.sum(2) if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) + l_length = self.dp(x, text_mask, w, g=g) + l_length = l_length / torch.sum(text_mask) else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(x_mask) # for averaging + logw_ = torch.log(w + 1e-6) * text_mask + logw = self.dp(x, text_mask, g=g) + l_length = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(text_mask) # for averaging # expand prior mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose( @@ -895,27 +895,27 @@ def forward(self, text, text_len, spec, spec_len, sid=None): l_length, attn, ids_slice, - x_mask, - y_mask, + text_mask, + spec_mask, (z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior), ) - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): - x, mean_prior, logscale_prior, x_mask = self.enc_p(x, x_lengths) + def infer(self, text, text_len, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): + x, mean_prior, logscale_prior, text_mask = self.enc_p(text, text_len) if self.n_speakers > 1 and sid is not None: g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] else: g = None if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) + logw = self.dp(x, text_mask, g=g, reverse=True, noise_scale=noise_scale_w) else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale + logw = self.dp(x, text_mask, g=g) + w = torch.exp(logw) * text_mask * length_scale w_ceil = torch.ceil(w) audio_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - audio_mask = torch.unsqueeze(get_mask_from_lengths(audio_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(audio_mask, -1) + audio_mask = torch.unsqueeze(get_mask_from_lengths(audio_lengths, None), 1).to(text_mask.dtype) + attn_mask = torch.unsqueeze(text_mask, 2) * torch.unsqueeze(audio_mask, -1) attn = generate_path(w_ceil, attn_mask) mean_prior = torch.matmul(attn.squeeze(1), mean_prior.transpose(1, 2)).transpose( From 65a1a6904f41aeb680ac61c6c2037cb03edaa0f2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 15 Dec 2022 12:13:15 +0000 Subject: [PATCH 226/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/helpers/helpers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index 147c3b352161..e6c7e7fc707a 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -577,7 +577,6 @@ def clip_grad_value_(parameters, clip_value, norm_type=2): return total_norm - def convert_pad_shape(pad_shape): l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] From 4dff876f3c9a4c99f892bdc20926f21cd05d40f6 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 15 Dec 2022 04:14:50 -0800 Subject: [PATCH 227/244] fix Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/helpers/helpers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index 147c3b352161..e6c7e7fc707a 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -577,7 +577,6 @@ def clip_grad_value_(parameters, clip_value, norm_type=2): return total_norm - def convert_pad_shape(pad_shape): l = pad_shape[::-1] pad_shape = [item for sublist in l for item in sublist] From 37bbd2e60327df7845754438652fafa7836807ca Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 15 Dec 2022 04:25:59 -0800 Subject: [PATCH 228/244] remove old cython code Signed-off-by: Evgeniy Shabalin --- .../tts/modules/monotonic_align/__init__.py | 50 +------------------ nemo/collections/tts/modules/vits_modules.py | 2 +- 2 files changed, 2 insertions(+), 50 deletions(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 2f8f95d68eb3..e3b113ef9ef7 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -37,8 +37,7 @@ import numpy as np import torch -# from .numba_core import maximum_path_c - +from .numba_core import maximum_path_c def maximum_path(neg_cent, mask): """ Cython optimized version. @@ -54,50 +53,3 @@ def maximum_path(neg_cent, mask): t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) maximum_path_c(path, neg_cent, t_t_max, t_s_max) return torch.from_numpy(path).to(device=device, dtype=dtype) - - -@numba.jit(nopython=True, boundscheck=False, parallel=True) -def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): - """ - Args: - path: int32[:, :] - value: float32[:, :] - t_y: int - t_x: int - max_neg_val: float - """ - index: int = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y - 1, x] - if x == 0: - if y == 0: - v_prev = 0.0 - else: - v_prev = max_neg_val - else: - v_prev = value[y - 1, x - 1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y - 1, index] < value[y - 1, index - 1]): - index = index - 1 - - -@numba.jit(nopython=True, boundscheck=False, parallel=True) -def maximum_path_c(paths, values, t_ys, t_xs): - """ - Args: - paths: int32[:, :, :] - values: float32[:, :, :] - t_ys: int[:] - t_xs: int[:] - """ - b: int = paths.shape[0] - for i in numba.prange(b): - maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 2ff5ae0f378d..1c1dc38ef210 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -868,7 +868,7 @@ def forward(self, text, text_len, spec, spec_len, sid=None): neg_cent4 = torch.sum(-0.5 * (mean_prior ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - attn_mask = torch.unsqueeze(text_mask, 2) * torch.unsqueeze(y_mask, -1) + attn_mask = torch.unsqueeze(text_mask, 2) * torch.unsqueeze(spec_mask, -1) attn = maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() w = attn.sum(2) From 1cd4041fab93b40e2390adedff8941a28f6d9fc2 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 15 Dec 2022 04:27:18 -0800 Subject: [PATCH 229/244] remove old cython code Signed-off-by: Evgeniy Shabalin --- .../tts/modules/monotonic_align/__init__.py | 1 - .../tts/modules/monotonic_align/core.c | 21336 ---------------- .../tts/modules/monotonic_align/core.pyx | 42 - .../tts/modules/monotonic_align/setup.py | 45 - 4 files changed, 21424 deletions(-) delete mode 100644 nemo/collections/tts/modules/monotonic_align/core.c delete mode 100644 nemo/collections/tts/modules/monotonic_align/core.pyx delete mode 100644 nemo/collections/tts/modules/monotonic_align/setup.py diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index e3b113ef9ef7..65bda2633e99 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -33,7 +33,6 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import numba import numpy as np import torch diff --git a/nemo/collections/tts/modules/monotonic_align/core.c b/nemo/collections/tts/modules/monotonic_align/core.c deleted file mode 100644 index 2e21659560ca..000000000000 --- a/nemo/collections/tts/modules/monotonic_align/core.c +++ /dev/null @@ -1,21336 +0,0 @@ -// Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// MIT License -// -// Copyright (c) 2021 Jaehyeon Kim -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - - -/* Generated by Cython 0.29.21 */ - -/* BEGIN: Cython Metadata -{ - "distutils": { - "name": "monotonic_align.core", - "sources": [ - "core.pyx" - ] - }, - "module_name": "monotonic_align.core" -} -END: Cython Metadata */ - -#define PY_SSIZE_T_CLEAN -#include "Python.h" -#ifndef Py_PYTHON_H - #error Python headers needed to compile C extensions, please install development version of Python. -#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) - #error Cython requires Python 2.6+ or Python 3.3+. -#else -#define CYTHON_ABI "0_29_21" -#define CYTHON_HEX_VERSION 0x001D15F0 -#define CYTHON_FUTURE_DIVISION 0 -#include -#ifndef offsetof - #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) -#endif -#if !defined(WIN32) && !defined(MS_WINDOWS) - #ifndef __stdcall - #define __stdcall - #endif - #ifndef __cdecl - #define __cdecl - #endif - #ifndef __fastcall - #define __fastcall - #endif -#endif -#ifndef DL_IMPORT - #define DL_IMPORT(t) t -#endif -#ifndef DL_EXPORT - #define DL_EXPORT(t) t -#endif -#define __PYX_COMMA , -#ifndef HAVE_LONG_LONG - #if PY_VERSION_HEX >= 0x02070000 - #define HAVE_LONG_LONG - #endif -#endif -#ifndef PY_LONG_LONG - #define PY_LONG_LONG LONG_LONG -#endif -#ifndef Py_HUGE_VAL - #define Py_HUGE_VAL HUGE_VAL -#endif -#ifdef PYPY_VERSION - #define CYTHON_COMPILING_IN_PYPY 1 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #undef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 0 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #if PY_VERSION_HEX < 0x03050000 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #undef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #undef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 1 - #undef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 0 - #undef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 0 - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#elif defined(PYSTON_VERSION) - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 1 - #define CYTHON_COMPILING_IN_CPYTHON 0 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #undef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 0 - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #undef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 0 - #undef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 0 - #undef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT 0 - #undef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE 0 - #undef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS 0 - #undef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK 0 -#else - #define CYTHON_COMPILING_IN_PYPY 0 - #define CYTHON_COMPILING_IN_PYSTON 0 - #define CYTHON_COMPILING_IN_CPYTHON 1 - #ifndef CYTHON_USE_TYPE_SLOTS - #define CYTHON_USE_TYPE_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYTYPE_LOOKUP - #define CYTHON_USE_PYTYPE_LOOKUP 0 - #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) - #define CYTHON_USE_PYTYPE_LOOKUP 1 - #endif - #if PY_MAJOR_VERSION < 3 - #undef CYTHON_USE_ASYNC_SLOTS - #define CYTHON_USE_ASYNC_SLOTS 0 - #elif !defined(CYTHON_USE_ASYNC_SLOTS) - #define CYTHON_USE_ASYNC_SLOTS 1 - #endif - #if PY_VERSION_HEX < 0x02070000 - #undef CYTHON_USE_PYLONG_INTERNALS - #define CYTHON_USE_PYLONG_INTERNALS 0 - #elif !defined(CYTHON_USE_PYLONG_INTERNALS) - #define CYTHON_USE_PYLONG_INTERNALS 1 - #endif - #ifndef CYTHON_USE_PYLIST_INTERNALS - #define CYTHON_USE_PYLIST_INTERNALS 1 - #endif - #ifndef CYTHON_USE_UNICODE_INTERNALS - #define CYTHON_USE_UNICODE_INTERNALS 1 - #endif - #if PY_VERSION_HEX < 0x030300F0 - #undef CYTHON_USE_UNICODE_WRITER - #define CYTHON_USE_UNICODE_WRITER 0 - #elif !defined(CYTHON_USE_UNICODE_WRITER) - #define CYTHON_USE_UNICODE_WRITER 1 - #endif - #ifndef CYTHON_AVOID_BORROWED_REFS - #define CYTHON_AVOID_BORROWED_REFS 0 - #endif - #ifndef CYTHON_ASSUME_SAFE_MACROS - #define CYTHON_ASSUME_SAFE_MACROS 1 - #endif - #ifndef CYTHON_UNPACK_METHODS - #define CYTHON_UNPACK_METHODS 1 - #endif - #ifndef CYTHON_FAST_THREAD_STATE - #define CYTHON_FAST_THREAD_STATE 1 - #endif - #ifndef CYTHON_FAST_PYCALL - #define CYTHON_FAST_PYCALL 1 - #endif - #ifndef CYTHON_PEP489_MULTI_PHASE_INIT - #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) - #endif - #ifndef CYTHON_USE_TP_FINALIZE - #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) - #endif - #ifndef CYTHON_USE_DICT_VERSIONS - #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) - #endif - #ifndef CYTHON_USE_EXC_INFO_STACK - #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) - #endif -#endif -#if !defined(CYTHON_FAST_PYCCALL) -#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) -#endif -#if CYTHON_USE_PYLONG_INTERNALS - #include "longintrepr.h" - #undef SHIFT - #undef BASE - #undef MASK - #ifdef SIZEOF_VOID_P - enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; - #endif -#endif -#ifndef __has_attribute - #define __has_attribute(x) 0 -#endif -#ifndef __has_cpp_attribute - #define __has_cpp_attribute(x) 0 -#endif -#ifndef CYTHON_RESTRICT - #if defined(__GNUC__) - #define CYTHON_RESTRICT __restrict__ - #elif defined(_MSC_VER) && _MSC_VER >= 1400 - #define CYTHON_RESTRICT __restrict - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_RESTRICT restrict - #else - #define CYTHON_RESTRICT - #endif -#endif -#ifndef CYTHON_UNUSED -# if defined(__GNUC__) -# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) -# define CYTHON_UNUSED __attribute__ ((__unused__)) -# else -# define CYTHON_UNUSED -# endif -#endif -#ifndef CYTHON_MAYBE_UNUSED_VAR -# if defined(__cplusplus) - template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } -# else -# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) -# endif -#endif -#ifndef CYTHON_NCP_UNUSED -# if CYTHON_COMPILING_IN_CPYTHON -# define CYTHON_NCP_UNUSED -# else -# define CYTHON_NCP_UNUSED CYTHON_UNUSED -# endif -#endif -#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) -#ifdef _MSC_VER - #ifndef _MSC_STDINT_H_ - #if _MSC_VER < 1300 - typedef unsigned char uint8_t; - typedef unsigned int uint32_t; - #else - typedef unsigned __int8 uint8_t; - typedef unsigned __int32 uint32_t; - #endif - #endif -#else - #include -#endif -#ifndef CYTHON_FALLTHROUGH - #if defined(__cplusplus) && __cplusplus >= 201103L - #if __has_cpp_attribute(fallthrough) - #define CYTHON_FALLTHROUGH [[fallthrough]] - #elif __has_cpp_attribute(clang::fallthrough) - #define CYTHON_FALLTHROUGH [[clang::fallthrough]] - #elif __has_cpp_attribute(gnu::fallthrough) - #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] - #endif - #endif - #ifndef CYTHON_FALLTHROUGH - #if __has_attribute(fallthrough) - #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) - #else - #define CYTHON_FALLTHROUGH - #endif - #endif - #if defined(__clang__ ) && defined(__apple_build_version__) - #if __apple_build_version__ < 7000000 - #undef CYTHON_FALLTHROUGH - #define CYTHON_FALLTHROUGH - #endif - #endif -#endif - -#ifndef CYTHON_INLINE - #if defined(__clang__) - #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) - #elif defined(__GNUC__) - #define CYTHON_INLINE __inline__ - #elif defined(_MSC_VER) - #define CYTHON_INLINE __inline - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define CYTHON_INLINE inline - #else - #define CYTHON_INLINE - #endif -#endif - -#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) - #define Py_OptimizeFlag 0 -#endif -#define __PYX_BUILD_PY_SSIZE_T "n" -#define CYTHON_FORMAT_SSIZE_T "z" -#if PY_MAJOR_VERSION < 3 - #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) - #define __Pyx_DefaultClassType PyClass_Type -#else - #define __Pyx_BUILTIN_MODULE_NAME "builtins" -#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2 - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#else - #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ - PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) -#endif - #define __Pyx_DefaultClassType PyType_Type -#endif -#ifndef Py_TPFLAGS_CHECKTYPES - #define Py_TPFLAGS_CHECKTYPES 0 -#endif -#ifndef Py_TPFLAGS_HAVE_INDEX - #define Py_TPFLAGS_HAVE_INDEX 0 -#endif -#ifndef Py_TPFLAGS_HAVE_NEWBUFFER - #define Py_TPFLAGS_HAVE_NEWBUFFER 0 -#endif -#ifndef Py_TPFLAGS_HAVE_FINALIZE - #define Py_TPFLAGS_HAVE_FINALIZE 0 -#endif -#ifndef METH_STACKLESS - #define METH_STACKLESS 0 -#endif -#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) - #ifndef METH_FASTCALL - #define METH_FASTCALL 0x80 - #endif - typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); - typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, - Py_ssize_t nargs, PyObject *kwnames); -#else - #define __Pyx_PyCFunctionFast _PyCFunctionFast - #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords -#endif -#if CYTHON_FAST_PYCCALL -#define __Pyx_PyFastCFunction_Check(func)\ - ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) -#else -#define __Pyx_PyFastCFunction_Check(func) 0 -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) - #define PyObject_Malloc(s) PyMem_Malloc(s) - #define PyObject_Free(p) PyMem_Free(p) - #define PyObject_Realloc(p) PyMem_Realloc(p) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 - #define PyMem_RawMalloc(n) PyMem_Malloc(n) - #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) - #define PyMem_RawFree(p) PyMem_Free(p) -#endif -#if CYTHON_COMPILING_IN_PYSTON - #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) -#else - #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) - #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) -#endif -#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#elif PY_VERSION_HEX >= 0x03060000 - #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() -#elif PY_VERSION_HEX >= 0x03000000 - #define __Pyx_PyThreadState_Current PyThreadState_GET() -#else - #define __Pyx_PyThreadState_Current _PyThreadState_Current -#endif -#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) -#include "pythread.h" -#define Py_tss_NEEDS_INIT 0 -typedef int Py_tss_t; -static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { - *key = PyThread_create_key(); - return 0; -} -static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { - Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); - *key = Py_tss_NEEDS_INIT; - return key; -} -static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { - PyObject_Free(key); -} -static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { - return *key != Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { - PyThread_delete_key(*key); - *key = Py_tss_NEEDS_INIT; -} -static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { - return PyThread_set_key_value(*key, value); -} -static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { - return PyThread_get_key_value(*key); -} -#endif -#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) -#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) -#else -#define __Pyx_PyDict_NewPresized(n) PyDict_New() -#endif -#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION - #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) -#else - #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) - #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) -#endif -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS -#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) -#else -#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) -#endif -#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) - #define CYTHON_PEP393_ENABLED 1 - #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ - 0 : _PyUnicode_Ready((PyObject *)(op))) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) - #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) - #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) - #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) - #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) - #else - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) - #endif -#else - #define CYTHON_PEP393_ENABLED 0 - #define PyUnicode_1BYTE_KIND 1 - #define PyUnicode_2BYTE_KIND 2 - #define PyUnicode_4BYTE_KIND 4 - #define __Pyx_PyUnicode_READY(op) (0) - #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) - #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) - #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) - #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) - #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) - #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) - #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) - #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) -#endif -#if CYTHON_COMPILING_IN_PYPY - #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) -#else - #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) - #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ - PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) - #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) - #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) -#endif -#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) - #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) -#endif -#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) -#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) -#else - #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) -#endif -#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) - #define PyObject_ASCII(o) PyObject_Repr(o) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBaseString_Type PyUnicode_Type - #define PyStringObject PyUnicodeObject - #define PyString_Type PyUnicode_Type - #define PyString_Check PyUnicode_Check - #define PyString_CheckExact PyUnicode_CheckExact -#ifndef PyObject_Unicode - #define PyObject_Unicode PyObject_Str -#endif -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) - #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) -#else - #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) - #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) -#endif -#ifndef PySet_CheckExact - #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) -#endif -#if PY_VERSION_HEX >= 0x030900A4 - #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) -#else - #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) - #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) -#endif -#if CYTHON_ASSUME_SAFE_MACROS - #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) -#else - #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyIntObject PyLongObject - #define PyInt_Type PyLong_Type - #define PyInt_Check(op) PyLong_Check(op) - #define PyInt_CheckExact(op) PyLong_CheckExact(op) - #define PyInt_FromString PyLong_FromString - #define PyInt_FromUnicode PyLong_FromUnicode - #define PyInt_FromLong PyLong_FromLong - #define PyInt_FromSize_t PyLong_FromSize_t - #define PyInt_FromSsize_t PyLong_FromSsize_t - #define PyInt_AsLong PyLong_AsLong - #define PyInt_AS_LONG PyLong_AS_LONG - #define PyInt_AsSsize_t PyLong_AsSsize_t - #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask - #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask - #define PyNumber_Int PyNumber_Long -#endif -#if PY_MAJOR_VERSION >= 3 - #define PyBoolObject PyLongObject -#endif -#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY - #ifndef PyUnicode_InternFromString - #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) - #endif -#endif -#if PY_VERSION_HEX < 0x030200A4 - typedef long Py_hash_t; - #define __Pyx_PyInt_FromHash_t PyInt_FromLong - #define __Pyx_PyInt_AsHash_t PyInt_AsLong -#else - #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t - #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t -#endif -#if PY_MAJOR_VERSION >= 3 - #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) -#else - #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) -#endif -#if CYTHON_USE_ASYNC_SLOTS - #if PY_VERSION_HEX >= 0x030500B1 - #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods - #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) - #else - #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) - #endif -#else - #define __Pyx_PyType_AsAsync(obj) NULL -#endif -#ifndef __Pyx_PyAsyncMethodsStruct - typedef struct { - unaryfunc am_await; - unaryfunc am_aiter; - unaryfunc am_anext; - } __Pyx_PyAsyncMethodsStruct; -#endif - -#if defined(WIN32) || defined(MS_WINDOWS) - #define _USE_MATH_DEFINES -#endif -#include -#ifdef NAN -#define __PYX_NAN() ((float) NAN) -#else -static CYTHON_INLINE float __PYX_NAN() { - float value; - memset(&value, 0xFF, sizeof(value)); - return value; -} -#endif -#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) -#define __Pyx_truncl trunc -#else -#define __Pyx_truncl truncl -#endif - -#define __PYX_MARK_ERR_POS(f_index, lineno) \ - { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } -#define __PYX_ERR(f_index, lineno, Ln_error) \ - { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } - -#ifndef __PYX_EXTERN_C - #ifdef __cplusplus - #define __PYX_EXTERN_C extern "C" - #else - #define __PYX_EXTERN_C extern - #endif -#endif - -#define __PYX_HAVE__monotonic_align__core -#define __PYX_HAVE_API__monotonic_align__core -/* Early includes */ -#include "pythread.h" -#include -#include -#include -#include "pystate.h" -#ifdef _OPENMP -#include -#endif /* _OPENMP */ - -#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) -#define CYTHON_WITHOUT_ASSERTIONS -#endif - -typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; - const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; - -#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 -#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) -#define __PYX_DEFAULT_STRING_ENCODING "" -#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString -#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#define __Pyx_uchar_cast(c) ((unsigned char)c) -#define __Pyx_long_cast(x) ((long)x) -#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ - (sizeof(type) < sizeof(Py_ssize_t)) ||\ - (sizeof(type) > sizeof(Py_ssize_t) &&\ - likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX) &&\ - (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ - v == (type)PY_SSIZE_T_MIN))) ||\ - (sizeof(type) == sizeof(Py_ssize_t) &&\ - (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ - v == (type)PY_SSIZE_T_MAX))) ) -static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { - return (size_t) i < (size_t) limit; -} -#if defined (__cplusplus) && __cplusplus >= 201103L - #include - #define __Pyx_sst_abs(value) std::abs(value) -#elif SIZEOF_INT >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) abs(value) -#elif SIZEOF_LONG >= SIZEOF_SIZE_T - #define __Pyx_sst_abs(value) labs(value) -#elif defined (_MSC_VER) - #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) -#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L - #define __Pyx_sst_abs(value) llabs(value) -#elif defined (__GNUC__) - #define __Pyx_sst_abs(value) __builtin_llabs(value) -#else - #define __Pyx_sst_abs(value) ((value<0) ? -value : value) -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); -#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) -#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) -#define __Pyx_PyBytes_FromString PyBytes_FromString -#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); -#if PY_MAJOR_VERSION < 3 - #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize -#else - #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString - #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize -#endif -#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) -#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) -#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) -#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) -#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) -#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) -#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) -static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { - const Py_UNICODE *u_end = u; - while (*u_end++) ; - return (size_t)(u_end - u - 1); -} -#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) -#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode -#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode -#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) -#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); -#define __Pyx_PySequence_Tuple(obj)\ - (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); -#if CYTHON_ASSUME_SAFE_MACROS -#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) -#else -#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) -#endif -#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) -#else -#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) -#endif -#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII -static int __Pyx_sys_getdefaultencoding_not_ascii; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - PyObject* ascii_chars_u = NULL; - PyObject* ascii_chars_b = NULL; - const char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - if (strcmp(default_encoding_c, "ascii") == 0) { - __Pyx_sys_getdefaultencoding_not_ascii = 0; - } else { - char ascii_chars[128]; - int c; - for (c = 0; c < 128; c++) { - ascii_chars[c] = c; - } - __Pyx_sys_getdefaultencoding_not_ascii = 1; - ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); - if (!ascii_chars_u) goto bad; - ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); - if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { - PyErr_Format( - PyExc_ValueError, - "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", - default_encoding_c); - goto bad; - } - Py_DECREF(ascii_chars_u); - Py_DECREF(ascii_chars_b); - } - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - Py_XDECREF(ascii_chars_u); - Py_XDECREF(ascii_chars_b); - return -1; -} -#endif -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) -#else -#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) -#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -static char* __PYX_DEFAULT_STRING_ENCODING; -static int __Pyx_init_sys_getdefaultencoding_params(void) { - PyObject* sys; - PyObject* default_encoding = NULL; - char* default_encoding_c; - sys = PyImport_ImportModule("sys"); - if (!sys) goto bad; - default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); - Py_DECREF(sys); - if (!default_encoding) goto bad; - default_encoding_c = PyBytes_AsString(default_encoding); - if (!default_encoding_c) goto bad; - __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); - if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; - strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); - Py_DECREF(default_encoding); - return 0; -bad: - Py_XDECREF(default_encoding); - return -1; -} -#endif -#endif - - -/* Test for GCC > 2.95 */ -#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) -#else /* !__GNUC__ or GCC < 2.95 */ - #define likely(x) (x) - #define unlikely(x) (x) -#endif /* __GNUC__ */ -static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } - -static PyObject *__pyx_m = NULL; -static PyObject *__pyx_d; -static PyObject *__pyx_b; -static PyObject *__pyx_cython_runtime = NULL; -static PyObject *__pyx_empty_tuple; -static PyObject *__pyx_empty_bytes; -static PyObject *__pyx_empty_unicode; -static int __pyx_lineno; -static int __pyx_clineno = 0; -static const char * __pyx_cfilenm= __FILE__; -static const char *__pyx_filename; - - -static const char *__pyx_f[] = { - "core.pyx", - "stringsource", -}; -/* NoFastGil.proto */ -#define __Pyx_PyGILState_Ensure PyGILState_Ensure -#define __Pyx_PyGILState_Release PyGILState_Release -#define __Pyx_FastGIL_Remember() -#define __Pyx_FastGIL_Forget() -#define __Pyx_FastGilFuncInit() - -/* MemviewSliceStruct.proto */ -struct __pyx_memoryview_obj; -typedef struct { - struct __pyx_memoryview_obj *memview; - char *data; - Py_ssize_t shape[8]; - Py_ssize_t strides[8]; - Py_ssize_t suboffsets[8]; -} __Pyx_memviewslice; -#define __Pyx_MemoryView_Len(m) (m.shape[0]) - -/* Atomics.proto */ -#include -#ifndef CYTHON_ATOMICS - #define CYTHON_ATOMICS 1 -#endif -#define __pyx_atomic_int_type int -#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ - (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ - !defined(__i386__) - #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) - #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using GNU atomics" - #endif -#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 - #include - #undef __pyx_atomic_int_type - #define __pyx_atomic_int_type LONG - #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #pragma message ("Using MSVC atomics") - #endif -#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 - #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) - #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) - #ifdef __PYX_DEBUG_ATOMICS - #warning "Using Intel atomics" - #endif -#else - #undef CYTHON_ATOMICS - #define CYTHON_ATOMICS 0 - #ifdef __PYX_DEBUG_ATOMICS - #warning "Not using atomics" - #endif -#endif -typedef volatile __pyx_atomic_int_type __pyx_atomic_int; -#if CYTHON_ATOMICS - #define __pyx_add_acquisition_count(memview)\ - __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) -#else - #define __pyx_add_acquisition_count(memview)\ - __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) - #define __pyx_sub_acquisition_count(memview)\ - __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) -#endif - -/* ForceInitThreads.proto */ -#ifndef __PYX_FORCE_INIT_THREADS - #define __PYX_FORCE_INIT_THREADS 0 -#endif - -/* BufferFormatStructs.proto */ -#define IS_UNSIGNED(type) (((type) -1) > 0) -struct __Pyx_StructField_; -#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) -typedef struct { - const char* name; - struct __Pyx_StructField_* fields; - size_t size; - size_t arraysize[8]; - int ndim; - char typegroup; - char is_unsigned; - int flags; -} __Pyx_TypeInfo; -typedef struct __Pyx_StructField_ { - __Pyx_TypeInfo* type; - const char* name; - size_t offset; -} __Pyx_StructField; -typedef struct { - __Pyx_StructField* field; - size_t parent_offset; -} __Pyx_BufFmt_StackElem; -typedef struct { - __Pyx_StructField root; - __Pyx_BufFmt_StackElem* head; - size_t fmt_offset; - size_t new_count, enc_count; - size_t struct_alignment; - int is_complex; - char enc_type; - char new_packmode; - char enc_packmode; - char is_valid_array; -} __Pyx_BufFmt_Context; - - -/*--- Type declarations ---*/ -struct __pyx_array_obj; -struct __pyx_MemviewEnum_obj; -struct __pyx_memoryview_obj; -struct __pyx_memoryviewslice_obj; -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each; - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ -struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each { - int __pyx_n; - float max_neg_val; -}; - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ -struct __pyx_array_obj { - PyObject_HEAD - struct __pyx_vtabstruct_array *__pyx_vtab; - char *data; - Py_ssize_t len; - char *format; - int ndim; - Py_ssize_t *_shape; - Py_ssize_t *_strides; - Py_ssize_t itemsize; - PyObject *mode; - PyObject *_format; - void (*callback_free_data)(void *); - int free_data; - int dtype_is_object; -}; - - -/* "View.MemoryView":279 - * - * @cname('__pyx_MemviewEnum') - * cdef class Enum(object): # <<<<<<<<<<<<<< - * cdef object name - * def __init__(self, name): - */ -struct __pyx_MemviewEnum_obj { - PyObject_HEAD - PyObject *name; -}; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ -struct __pyx_memoryview_obj { - PyObject_HEAD - struct __pyx_vtabstruct_memoryview *__pyx_vtab; - PyObject *obj; - PyObject *_size; - PyObject *_array_interface; - PyThread_type_lock lock; - __pyx_atomic_int acquisition_count[2]; - __pyx_atomic_int *acquisition_count_aligned_p; - Py_buffer view; - int flags; - int dtype_is_object; - __Pyx_TypeInfo *typeinfo; -}; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ -struct __pyx_memoryviewslice_obj { - struct __pyx_memoryview_obj __pyx_base; - __Pyx_memviewslice from_slice; - PyObject *from_object; - PyObject *(*to_object_func)(char *); - int (*to_dtype_func)(char *, PyObject *); -}; - - - -/* "View.MemoryView":105 - * - * @cname("__pyx_array") - * cdef class array: # <<<<<<<<<<<<<< - * - * cdef: - */ - -struct __pyx_vtabstruct_array { - PyObject *(*get_memview)(struct __pyx_array_obj *); -}; -static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; - - -/* "View.MemoryView":330 - * - * @cname('__pyx_memoryview') - * cdef class memoryview(object): # <<<<<<<<<<<<<< - * - * cdef object obj - */ - -struct __pyx_vtabstruct_memoryview { - char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); - PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); - PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); - PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); -}; -static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; - - -/* "View.MemoryView":965 - * - * @cname('__pyx_memoryviewslice') - * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< - * "Internal class for passing memoryview slices to Python" - * - */ - -struct __pyx_vtabstruct__memoryviewslice { - struct __pyx_vtabstruct_memoryview __pyx_base; -}; -static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; - -/* --- Runtime support code (head) --- */ -/* Refnanny.proto */ -#ifndef CYTHON_REFNANNY - #define CYTHON_REFNANNY 0 -#endif -#if CYTHON_REFNANNY - typedef struct { - void (*INCREF)(void*, PyObject*, int); - void (*DECREF)(void*, PyObject*, int); - void (*GOTREF)(void*, PyObject*, int); - void (*GIVEREF)(void*, PyObject*, int); - void* (*SetupContext)(const char*, int, const char*); - void (*FinishContext)(void**); - } __Pyx_RefNannyAPIStruct; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; - static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); - #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; -#ifdef WITH_THREAD - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - if (acquire_gil) {\ - PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - PyGILState_Release(__pyx_gilstate_save);\ - } else {\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ - } -#else - #define __Pyx_RefNannySetupContext(name, acquire_gil)\ - __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) -#endif - #define __Pyx_RefNannyFinishContext()\ - __Pyx_RefNanny->FinishContext(&__pyx_refnanny) - #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) - #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) - #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) - #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) - #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) -#else - #define __Pyx_RefNannyDeclarations - #define __Pyx_RefNannySetupContext(name, acquire_gil) - #define __Pyx_RefNannyFinishContext() - #define __Pyx_INCREF(r) Py_INCREF(r) - #define __Pyx_DECREF(r) Py_DECREF(r) - #define __Pyx_GOTREF(r) - #define __Pyx_GIVEREF(r) - #define __Pyx_XINCREF(r) Py_XINCREF(r) - #define __Pyx_XDECREF(r) Py_XDECREF(r) - #define __Pyx_XGOTREF(r) - #define __Pyx_XGIVEREF(r) -#endif -#define __Pyx_XDECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_XDECREF(tmp);\ - } while (0) -#define __Pyx_DECREF_SET(r, v) do {\ - PyObject *tmp = (PyObject *) r;\ - r = v; __Pyx_DECREF(tmp);\ - } while (0) -#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) -#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) - -/* PyObjectGetAttrStr.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) -#endif - -/* GetBuiltinName.proto */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name); - -/* MemviewSliceInit.proto */ -#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d -#define __Pyx_MEMVIEW_DIRECT 1 -#define __Pyx_MEMVIEW_PTR 2 -#define __Pyx_MEMVIEW_FULL 4 -#define __Pyx_MEMVIEW_CONTIG 8 -#define __Pyx_MEMVIEW_STRIDED 16 -#define __Pyx_MEMVIEW_FOLLOW 32 -#define __Pyx_IS_C_CONTIG 1 -#define __Pyx_IS_F_CONTIG 2 -static int __Pyx_init_memviewslice( - struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference); -static CYTHON_INLINE int __pyx_add_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( - __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); -#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) -#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) -#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) -#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) -static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); - -/* RaiseArgTupleInvalid.proto */ -static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, - Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); - -/* RaiseDoubleKeywords.proto */ -static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); - -/* ParseKeywords.proto */ -static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ - PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ - const char* function_name); - -/* None.proto */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); - -/* ArgTypeTest.proto */ -#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ - ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ - __Pyx__ArgTypeTest(obj, type, name, exact)) -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); - -/* PyObjectCall.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); -#else -#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) -#endif - -/* PyThreadStateGet.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; -#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; -#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type -#else -#define __Pyx_PyThreadState_declare -#define __Pyx_PyThreadState_assign -#define __Pyx_PyErr_Occurred() PyErr_Occurred() -#endif - -/* PyErrFetchRestore.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) -#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) -#else -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#endif -#else -#define __Pyx_PyErr_Clear() PyErr_Clear() -#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) -#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) -#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) -#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) -#endif - -/* RaiseException.proto */ -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); - -/* PyCFunctionFastCall.proto */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); -#else -#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) -#endif - -/* PyFunctionFastCall.proto */ -#if CYTHON_FAST_PYCALL -#define __Pyx_PyFunction_FastCall(func, args, nargs)\ - __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); -#else -#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) -#endif -#define __Pyx_BUILD_ASSERT_EXPR(cond)\ - (sizeof(char [1 - 2*!(cond)]) - 1) -#ifndef Py_MEMBER_SIZE -#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) -#endif - static size_t __pyx_pyframe_localsplus_offset = 0; - #include "frameobject.h" - #define __Pxy_PyFrame_Initialize_Offsets()\ - ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ - (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) - #define __Pyx_PyFrame_GetLocalsplus(frame)\ - (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) -#endif - -/* PyObjectCall2Args.proto */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); - -/* PyObjectCallMethO.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); -#endif - -/* PyObjectCallOneArg.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); - -/* IncludeStringH.proto */ -#include - -/* BytesEquals.proto */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); - -/* UnicodeEquals.proto */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); - -/* StrEquals.proto */ -#if PY_MAJOR_VERSION >= 3 -#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals -#else -#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals -#endif - -/* None.proto */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); - -/* UnaryNegOverflows.proto */ -#define UNARY_NEG_WOULD_OVERFLOW(x)\ - (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) - -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ -/* GetAttr.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); - -/* GetItemInt.proto */ -#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ - (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ - __Pyx_GetItemInt_Generic(o, to_py_func(i)))) -#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ - (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ - __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ - (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - int wraparound, int boundscheck); -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, - int is_list, int wraparound, int boundscheck); - -/* ObjectGetItem.proto */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key); -#else -#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) -#endif - -/* decode_c_string_utf16.proto */ -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 0; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = -1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} -static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { - int byteorder = 1; - return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); -} - -/* decode_c_string.proto */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); - -/* PyErrExceptionMatches.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) -static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); -#else -#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) -#endif - -/* GetAttr3.proto */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); - -/* PyDictVersioning.proto */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) -#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ - (version_var) = __PYX_GET_DICT_VERSION(dict);\ - (cache_var) = (value); -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ - (VAR) = __pyx_dict_cached_value;\ - } else {\ - (VAR) = __pyx_dict_cached_value = (LOOKUP);\ - __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ - }\ -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); -#else -#define __PYX_GET_DICT_VERSION(dict) (0) -#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) -#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); -#endif - -/* GetModuleGlobalName.proto */ -#if CYTHON_USE_DICT_VERSIONS -#define __Pyx_GetModuleGlobalName(var, name) {\ - static PY_UINT64_T __pyx_dict_version = 0;\ - static PyObject *__pyx_dict_cached_value = NULL;\ - (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ - (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ - __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ - PY_UINT64_T __pyx_dict_version;\ - PyObject *__pyx_dict_cached_value;\ - (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ -} -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); -#else -#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) -#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); -#endif - -/* RaiseTooManyValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); - -/* RaiseNeedMoreValuesToUnpack.proto */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); - -/* RaiseNoneIterError.proto */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); - -/* ExtTypeTest.proto */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); - -/* GetTopmostException.proto */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); -#endif - -/* SaveResetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); -#else -#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) -#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) -#endif - -/* GetException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* SwapException.proto */ -#if CYTHON_FAST_THREAD_STATE -#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); -#endif - -/* Import.proto */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); - -/* FastTypeChecks.proto */ -#if CYTHON_COMPILING_IN_CPYTHON -#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); -static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); -#else -#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) -#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) -#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) -#endif -#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) - -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -/* ListCompAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len)) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) -#endif - -/* PyIntBinop.proto */ -#if !CYTHON_COMPILING_IN_PYPY -static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); -#else -#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\ - (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) -#endif - -/* ListExtend.proto */ -static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { -#if CYTHON_COMPILING_IN_CPYTHON - PyObject* none = _PyList_Extend((PyListObject*)L, v); - if (unlikely(!none)) - return -1; - Py_DECREF(none); - return 0; -#else - return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); -#endif -} - -/* ListAppend.proto */ -#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS -static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { - PyListObject* L = (PyListObject*) list; - Py_ssize_t len = Py_SIZE(list); - if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { - Py_INCREF(x); - PyList_SET_ITEM(list, len, x); - __Pyx_SET_SIZE(list, len + 1); - return 0; - } - return PyList_Append(list, x); -} -#else -#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) -#endif - -/* None.proto */ -static CYTHON_INLINE long __Pyx_div_long(long, long); - -/* ImportFrom.proto */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); - -/* HasAttr.proto */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); - -/* PyObject_GenericGetAttrNoDict.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr -#endif - -/* PyObject_GenericGetAttr.proto */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); -#else -#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr -#endif - -/* SetVTable.proto */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable); - -/* PyObjectGetAttrStrNoError.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); - -/* SetupReduce.proto */ -static int __Pyx_setup_reduce(PyObject* type_obj); - -/* CLineInTraceback.proto */ -#ifdef CYTHON_CLINE_IN_TRACEBACK -#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) -#else -static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); -#endif - -/* CodeObjectCache.proto */ -typedef struct { - PyCodeObject* code_object; - int code_line; -} __Pyx_CodeObjectCacheEntry; -struct __Pyx_CodeObjectCache { - int count; - int max_count; - __Pyx_CodeObjectCacheEntry* entries; -}; -static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); -static PyCodeObject *__pyx_find_code_object(int code_line); -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); - -/* AddTraceback.proto */ -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename); - -#if PY_MAJOR_VERSION < 3 - static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); - static void __Pyx_ReleaseBuffer(Py_buffer *view); -#else - #define __Pyx_GetBuffer PyObject_GetBuffer - #define __Pyx_ReleaseBuffer PyBuffer_Release -#endif - - -/* BufferStructDeclare.proto */ -typedef struct { - Py_ssize_t shape, strides, suboffsets; -} __Pyx_Buf_DimInfo; -typedef struct { - size_t refcount; - Py_buffer pybuffer; -} __Pyx_Buffer; -typedef struct { - __Pyx_Buffer *rcbuffer; - char *data; - __Pyx_Buf_DimInfo diminfo[8]; -} __Pyx_LocalBuf_ND; - -/* MemviewSliceIsContig.proto */ -static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); - -/* OverlappingSlices.proto */ -static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize); - -/* Capsule.proto */ -static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); - -/* IsLittleEndian.proto */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); - -/* BufferFormatCheck.proto */ -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type); - -/* TypeInfoCompare.proto */ -static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); - -/* MemviewSliceValidateAndInit.proto */ -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *, int writable_flag); - -/* ObjectToMemviewSlice.proto */ -static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *, int writable_flag); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); - -/* CIntToPy.proto */ -static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); - -/* MemviewSliceCopyTemplate.proto */ -static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object); - -/* CIntFromPy.proto */ -static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); - -/* CIntFromPy.proto */ -static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); - -/* CheckBinaryVersion.proto */ -static int __Pyx_check_binary_version(void); - -/* InitStrings.proto */ -static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ - -/* Module declarations from 'cython.view' */ - -/* Module declarations from 'cython' */ - -/* Module declarations from 'monotonic_align.core' */ -static PyTypeObject *__pyx_array_type = 0; -static PyTypeObject *__pyx_MemviewEnum_type = 0; -static PyTypeObject *__pyx_memoryview_type = 0; -static PyTypeObject *__pyx_memoryviewslice_type = 0; -static PyObject *generic = 0; -static PyObject *strided = 0; -static PyObject *indirect = 0; -static PyObject *contiguous = 0; -static PyObject *indirect_contiguous = 0; -static int __pyx_memoryview_thread_locks_used; -static PyThread_type_lock __pyx_memoryview_thread_locks[8]; -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice, __Pyx_memviewslice, int, int, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch); /*proto*/ -static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ -static void *__pyx_align_pointer(void *, size_t); /*proto*/ -static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ -static PyObject *_unellipsify(PyObject *, int); /*proto*/ -static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ -static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ -static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ -static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ -static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ -static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ -static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ -static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ -static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ -static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ -static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 }; -static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; -#define __Pyx_MODULE_NAME "monotonic_align.core" -extern int __pyx_module_is_main_monotonic_align__core; -int __pyx_module_is_main_monotonic_align__core = 0; - -/* Implementation of 'monotonic_align.core' */ -static PyObject *__pyx_builtin_range; -static PyObject *__pyx_builtin_ValueError; -static PyObject *__pyx_builtin_MemoryError; -static PyObject *__pyx_builtin_enumerate; -static PyObject *__pyx_builtin_TypeError; -static PyObject *__pyx_builtin_Ellipsis; -static PyObject *__pyx_builtin_id; -static PyObject *__pyx_builtin_IndexError; -static const char __pyx_k_O[] = "O"; -static const char __pyx_k_c[] = "c"; -static const char __pyx_k_id[] = "id"; -static const char __pyx_k_new[] = "__new__"; -static const char __pyx_k_obj[] = "obj"; -static const char __pyx_k_base[] = "base"; -static const char __pyx_k_dict[] = "__dict__"; -static const char __pyx_k_main[] = "__main__"; -static const char __pyx_k_mode[] = "mode"; -static const char __pyx_k_name[] = "name"; -static const char __pyx_k_ndim[] = "ndim"; -static const char __pyx_k_pack[] = "pack"; -static const char __pyx_k_size[] = "size"; -static const char __pyx_k_step[] = "step"; -static const char __pyx_k_stop[] = "stop"; -static const char __pyx_k_t_xs[] = "t_xs"; -static const char __pyx_k_t_ys[] = "t_ys"; -static const char __pyx_k_test[] = "__test__"; -static const char __pyx_k_ASCII[] = "ASCII"; -static const char __pyx_k_class[] = "__class__"; -static const char __pyx_k_error[] = "error"; -static const char __pyx_k_flags[] = "flags"; -static const char __pyx_k_paths[] = "paths"; -static const char __pyx_k_range[] = "range"; -static const char __pyx_k_shape[] = "shape"; -static const char __pyx_k_start[] = "start"; -static const char __pyx_k_encode[] = "encode"; -static const char __pyx_k_format[] = "format"; -static const char __pyx_k_import[] = "__import__"; -static const char __pyx_k_name_2[] = "__name__"; -static const char __pyx_k_pickle[] = "pickle"; -static const char __pyx_k_reduce[] = "__reduce__"; -static const char __pyx_k_struct[] = "struct"; -static const char __pyx_k_unpack[] = "unpack"; -static const char __pyx_k_update[] = "update"; -static const char __pyx_k_values[] = "values"; -static const char __pyx_k_fortran[] = "fortran"; -static const char __pyx_k_memview[] = "memview"; -static const char __pyx_k_Ellipsis[] = "Ellipsis"; -static const char __pyx_k_getstate[] = "__getstate__"; -static const char __pyx_k_itemsize[] = "itemsize"; -static const char __pyx_k_pyx_type[] = "__pyx_type"; -static const char __pyx_k_setstate[] = "__setstate__"; -static const char __pyx_k_TypeError[] = "TypeError"; -static const char __pyx_k_enumerate[] = "enumerate"; -static const char __pyx_k_pyx_state[] = "__pyx_state"; -static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; -static const char __pyx_k_IndexError[] = "IndexError"; -static const char __pyx_k_ValueError[] = "ValueError"; -static const char __pyx_k_pyx_result[] = "__pyx_result"; -static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; -static const char __pyx_k_MemoryError[] = "MemoryError"; -static const char __pyx_k_PickleError[] = "PickleError"; -static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; -static const char __pyx_k_stringsource[] = "stringsource"; -static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; -static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; -static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; -static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; -static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; -static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; -static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; -static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; -static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; -static const char __pyx_k_strided_and_direct[] = ""; -static const char __pyx_k_strided_and_indirect[] = ""; -static const char __pyx_k_contiguous_and_direct[] = ""; -static const char __pyx_k_MemoryView_of_r_object[] = ""; -static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; -static const char __pyx_k_contiguous_and_indirect[] = ""; -static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; -static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; -static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; -static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; -static const char __pyx_k_strided_and_direct_or_indirect[] = ""; -static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; -static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; -static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; -static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; -static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; -static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; -static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; -static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; -static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; -static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; -static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; -static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; -static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; -static PyObject *__pyx_n_s_ASCII; -static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; -static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; -static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; -static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; -static PyObject *__pyx_kp_s_Cannot_index_with_type_s; -static PyObject *__pyx_n_s_Ellipsis; -static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; -static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; -static PyObject *__pyx_n_s_IndexError; -static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; -static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; -static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; -static PyObject *__pyx_n_s_MemoryError; -static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; -static PyObject *__pyx_kp_s_MemoryView_of_r_object; -static PyObject *__pyx_n_b_O; -static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; -static PyObject *__pyx_n_s_PickleError; -static PyObject *__pyx_n_s_TypeError; -static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; -static PyObject *__pyx_n_s_ValueError; -static PyObject *__pyx_n_s_View_MemoryView; -static PyObject *__pyx_n_s_allocate_buffer; -static PyObject *__pyx_n_s_base; -static PyObject *__pyx_n_s_c; -static PyObject *__pyx_n_u_c; -static PyObject *__pyx_n_s_class; -static PyObject *__pyx_n_s_cline_in_traceback; -static PyObject *__pyx_kp_s_contiguous_and_direct; -static PyObject *__pyx_kp_s_contiguous_and_indirect; -static PyObject *__pyx_n_s_dict; -static PyObject *__pyx_n_s_dtype_is_object; -static PyObject *__pyx_n_s_encode; -static PyObject *__pyx_n_s_enumerate; -static PyObject *__pyx_n_s_error; -static PyObject *__pyx_n_s_flags; -static PyObject *__pyx_n_s_format; -static PyObject *__pyx_n_s_fortran; -static PyObject *__pyx_n_u_fortran; -static PyObject *__pyx_n_s_getstate; -static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; -static PyObject *__pyx_n_s_id; -static PyObject *__pyx_n_s_import; -static PyObject *__pyx_n_s_itemsize; -static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; -static PyObject *__pyx_n_s_main; -static PyObject *__pyx_n_s_memview; -static PyObject *__pyx_n_s_mode; -static PyObject *__pyx_n_s_name; -static PyObject *__pyx_n_s_name_2; -static PyObject *__pyx_n_s_ndim; -static PyObject *__pyx_n_s_new; -static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; -static PyObject *__pyx_n_s_obj; -static PyObject *__pyx_n_s_pack; -static PyObject *__pyx_n_s_paths; -static PyObject *__pyx_n_s_pickle; -static PyObject *__pyx_n_s_pyx_PickleError; -static PyObject *__pyx_n_s_pyx_checksum; -static PyObject *__pyx_n_s_pyx_getbuffer; -static PyObject *__pyx_n_s_pyx_result; -static PyObject *__pyx_n_s_pyx_state; -static PyObject *__pyx_n_s_pyx_type; -static PyObject *__pyx_n_s_pyx_unpickle_Enum; -static PyObject *__pyx_n_s_pyx_vtable; -static PyObject *__pyx_n_s_range; -static PyObject *__pyx_n_s_reduce; -static PyObject *__pyx_n_s_reduce_cython; -static PyObject *__pyx_n_s_reduce_ex; -static PyObject *__pyx_n_s_setstate; -static PyObject *__pyx_n_s_setstate_cython; -static PyObject *__pyx_n_s_shape; -static PyObject *__pyx_n_s_size; -static PyObject *__pyx_n_s_start; -static PyObject *__pyx_n_s_step; -static PyObject *__pyx_n_s_stop; -static PyObject *__pyx_kp_s_strided_and_direct; -static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; -static PyObject *__pyx_kp_s_strided_and_indirect; -static PyObject *__pyx_kp_s_stringsource; -static PyObject *__pyx_n_s_struct; -static PyObject *__pyx_n_s_t_xs; -static PyObject *__pyx_n_s_t_ys; -static PyObject *__pyx_n_s_test; -static PyObject *__pyx_kp_s_unable_to_allocate_array_data; -static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; -static PyObject *__pyx_n_s_unpack; -static PyObject *__pyx_n_s_update; -static PyObject *__pyx_n_s_values; -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ -static PyObject *__pyx_int_0; -static PyObject *__pyx_int_1; -static PyObject *__pyx_int_184977713; -static PyObject *__pyx_int_neg_1; -static float __pyx_k_; -static PyObject *__pyx_tuple__2; -static PyObject *__pyx_tuple__3; -static PyObject *__pyx_tuple__4; -static PyObject *__pyx_tuple__5; -static PyObject *__pyx_tuple__6; -static PyObject *__pyx_tuple__7; -static PyObject *__pyx_tuple__8; -static PyObject *__pyx_tuple__9; -static PyObject *__pyx_slice__16; -static PyObject *__pyx_tuple__10; -static PyObject *__pyx_tuple__11; -static PyObject *__pyx_tuple__12; -static PyObject *__pyx_tuple__13; -static PyObject *__pyx_tuple__14; -static PyObject *__pyx_tuple__15; -static PyObject *__pyx_tuple__17; -static PyObject *__pyx_tuple__18; -static PyObject *__pyx_tuple__19; -static PyObject *__pyx_tuple__20; -static PyObject *__pyx_tuple__21; -static PyObject *__pyx_tuple__22; -static PyObject *__pyx_tuple__23; -static PyObject *__pyx_tuple__24; -static PyObject *__pyx_tuple__25; -static PyObject *__pyx_codeobj__26; -/* Late includes */ - -/* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - -static void __pyx_f_15monotonic_align_4core_maximum_path_each(__Pyx_memviewslice __pyx_v_path, __Pyx_memviewslice __pyx_v_value, int __pyx_v_t_y, int __pyx_v_t_x, struct __pyx_opt_args_15monotonic_align_4core_maximum_path_each *__pyx_optional_args) { - float __pyx_v_max_neg_val = __pyx_k_; - int __pyx_v_x; - int __pyx_v_y; - float __pyx_v_v_prev; - float __pyx_v_v_cur; - int __pyx_v_index; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - long __pyx_t_4; - int __pyx_t_5; - long __pyx_t_6; - long __pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - Py_ssize_t __pyx_t_10; - float __pyx_t_11; - float __pyx_t_12; - float __pyx_t_13; - int __pyx_t_14; - Py_ssize_t __pyx_t_15; - Py_ssize_t __pyx_t_16; - if (__pyx_optional_args) { - if (__pyx_optional_args->__pyx_n > 0) { - __pyx_v_max_neg_val = __pyx_optional_args->max_neg_val; - } - } - - /* "monotonic_align/core.pyx":13 - * cdef float v_cur - * cdef float tmp - * cdef int index = t_x - 1 # <<<<<<<<<<<<<< - * - * for y in range(t_y): - */ - __pyx_v_index = (__pyx_v_t_x - 1); - - /* "monotonic_align/core.pyx":15 - * cdef int index = t_x - 1 - * - * for y in range(t_y): # <<<<<<<<<<<<<< - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - */ - __pyx_t_1 = __pyx_v_t_y; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_y = __pyx_t_3; - - /* "monotonic_align/core.pyx":16 - * - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): # <<<<<<<<<<<<<< - * if x == y: - * v_cur = max_neg_val - */ - __pyx_t_4 = (__pyx_v_y + 1); - __pyx_t_5 = __pyx_v_t_x; - if (((__pyx_t_4 < __pyx_t_5) != 0)) { - __pyx_t_6 = __pyx_t_4; - } else { - __pyx_t_6 = __pyx_t_5; - } - __pyx_t_4 = __pyx_t_6; - __pyx_t_5 = ((__pyx_v_t_x + __pyx_v_y) - __pyx_v_t_y); - __pyx_t_6 = 0; - if (((__pyx_t_5 > __pyx_t_6) != 0)) { - __pyx_t_7 = __pyx_t_5; - } else { - __pyx_t_7 = __pyx_t_6; - } - __pyx_t_6 = __pyx_t_4; - for (__pyx_t_5 = __pyx_t_7; __pyx_t_5 < __pyx_t_6; __pyx_t_5+=1) { - __pyx_v_x = __pyx_t_5; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - __pyx_t_8 = ((__pyx_v_x == __pyx_v_y) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":18 - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: - * v_cur = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_cur = value[y-1, x] - */ - __pyx_v_v_cur = __pyx_v_max_neg_val; - - /* "monotonic_align/core.pyx":17 - * for y in range(t_y): - * for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - * if x == y: # <<<<<<<<<<<<<< - * v_cur = max_neg_val - * else: - */ - goto __pyx_L7; - } - - /* "monotonic_align/core.pyx":20 - * v_cur = max_neg_val - * else: - * v_cur = value[y-1, x] # <<<<<<<<<<<<<< - * if x == 0: - * if y == 0: - */ - /*else*/ { - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_x; - __pyx_v_v_cur = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))); - } - __pyx_L7:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - __pyx_t_8 = ((__pyx_v_x == 0) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - __pyx_t_8 = ((__pyx_v_y == 0) != 0); - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":23 - * if x == 0: - * if y == 0: - * v_prev = 0. # <<<<<<<<<<<<<< - * else: - * v_prev = max_neg_val - */ - __pyx_v_v_prev = 0.; - - /* "monotonic_align/core.pyx":22 - * v_cur = value[y-1, x] - * if x == 0: - * if y == 0: # <<<<<<<<<<<<<< - * v_prev = 0. - * else: - */ - goto __pyx_L9; - } - - /* "monotonic_align/core.pyx":25 - * v_prev = 0. - * else: - * v_prev = max_neg_val # <<<<<<<<<<<<<< - * else: - * v_prev = value[y-1, x-1] - */ - /*else*/ { - __pyx_v_v_prev = __pyx_v_max_neg_val; - } - __pyx_L9:; - - /* "monotonic_align/core.pyx":21 - * else: - * v_cur = value[y-1, x] - * if x == 0: # <<<<<<<<<<<<<< - * if y == 0: - * v_prev = 0. - */ - goto __pyx_L8; - } - - /* "monotonic_align/core.pyx":27 - * v_prev = max_neg_val - * else: - * v_prev = value[y-1, x-1] # <<<<<<<<<<<<<< - * value[y, x] += max(v_prev, v_cur) - * - */ - /*else*/ { - __pyx_t_10 = (__pyx_v_y - 1); - __pyx_t_9 = (__pyx_v_x - 1); - __pyx_v_v_prev = (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_10 * __pyx_v_value.strides[0]) )) + __pyx_t_9)) ))); - } - __pyx_L8:; - - /* "monotonic_align/core.pyx":28 - * else: - * v_prev = value[y-1, x-1] - * value[y, x] += max(v_prev, v_cur) # <<<<<<<<<<<<<< - * - * for y in range(t_y - 1, -1, -1): - */ - __pyx_t_11 = __pyx_v_v_cur; - __pyx_t_12 = __pyx_v_v_prev; - if (((__pyx_t_11 > __pyx_t_12) != 0)) { - __pyx_t_13 = __pyx_t_11; - } else { - __pyx_t_13 = __pyx_t_12; - } - __pyx_t_9 = __pyx_v_y; - __pyx_t_10 = __pyx_v_x; - *((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) )) += __pyx_t_13; - } - } - - /* "monotonic_align/core.pyx":30 - * value[y, x] += max(v_prev, v_cur) - * - * for y in range(t_y - 1, -1, -1): # <<<<<<<<<<<<<< - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - */ - for (__pyx_t_1 = (__pyx_v_t_y - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_y = __pyx_t_1; - - /* "monotonic_align/core.pyx":31 - * - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 # <<<<<<<<<<<<<< - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 - */ - __pyx_t_10 = __pyx_v_y; - __pyx_t_9 = __pyx_v_index; - *((int *) ( /* dim=1 */ ((char *) (((int *) ( /* dim=0 */ (__pyx_v_path.data + __pyx_t_10 * __pyx_v_path.strides[0]) )) + __pyx_t_9)) )) = 1; - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - __pyx_t_14 = ((__pyx_v_index != 0) != 0); - if (__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_14 = ((__pyx_v_index == __pyx_v_y) != 0); - if (!__pyx_t_14) { - } else { - __pyx_t_8 = __pyx_t_14; - goto __pyx_L13_bool_binop_done; - } - __pyx_t_9 = (__pyx_v_y - 1); - __pyx_t_10 = __pyx_v_index; - __pyx_t_15 = (__pyx_v_y - 1); - __pyx_t_16 = (__pyx_v_index - 1); - __pyx_t_14 = (((*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_9 * __pyx_v_value.strides[0]) )) + __pyx_t_10)) ))) < (*((float *) ( /* dim=1 */ ((char *) (((float *) ( /* dim=0 */ (__pyx_v_value.data + __pyx_t_15 * __pyx_v_value.strides[0]) )) + __pyx_t_16)) )))) != 0); - __pyx_t_8 = __pyx_t_14; - __pyx_L13_bool_binop_done:; - if (__pyx_t_8) { - - /* "monotonic_align/core.pyx":33 - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - * index = index - 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_index = (__pyx_v_index - 1); - - /* "monotonic_align/core.pyx":32 - * for y in range(t_y - 1, -1, -1): - * path[y, index] = 1 - * if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): # <<<<<<<<<<<<<< - * index = index - 1 - * - */ - } - } - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - - /* function exit code */ -} - -/* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static void __pyx_f_15monotonic_align_4core_maximum_path_c(__Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs, CYTHON_UNUSED int __pyx_skip_dispatch) { - CYTHON_UNUSED int __pyx_v_b; - int __pyx_v_i; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - __Pyx_memviewslice __pyx_t_4 = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; - Py_ssize_t __pyx_t_6; - Py_ssize_t __pyx_t_7; - - /* "monotonic_align/core.pyx":39 - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: - * cdef int b = paths.shape[0] # <<<<<<<<<<<<<< - * cdef int i - * for i in prange(b, nogil=True): - */ - __pyx_v_b = (__pyx_v_paths.shape[0]); - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - { - #ifdef WITH_THREAD - PyThreadState *_save; - Py_UNBLOCK_THREADS - __Pyx_FastGIL_Remember(); - #endif - /*try:*/ { - __pyx_t_1 = __pyx_v_b; - if ((1 == 0)) abort(); - { - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) (x) - #define unlikely(x) (x) - #endif - __pyx_t_3 = (__pyx_t_1 - 0 + 1 - 1/abs(1)) / 1; - if (__pyx_t_3 > 0) - { - #ifdef _OPENMP - #pragma omp parallel private(__pyx_t_6, __pyx_t_7) firstprivate(__pyx_t_4, __pyx_t_5) - #endif /* _OPENMP */ - { - #ifdef _OPENMP - #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) - #endif /* _OPENMP */ - for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_3; __pyx_t_2++){ - { - __pyx_v_i = (int)(0 + 1 * __pyx_t_2); - - /* "monotonic_align/core.pyx":42 - * cdef int i - * for i in prange(b, nogil=True): - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) # <<<<<<<<<<<<<< - */ - __pyx_t_4.data = __pyx_v_paths.data; - __pyx_t_4.memview = __pyx_v_paths.memview; - __PYX_INC_MEMVIEW(&__pyx_t_4, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_paths.strides[0]; - __pyx_t_4.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_4.shape[0] = __pyx_v_paths.shape[1]; -__pyx_t_4.strides[0] = __pyx_v_paths.strides[1]; - __pyx_t_4.suboffsets[0] = -1; - -__pyx_t_4.shape[1] = __pyx_v_paths.shape[2]; -__pyx_t_4.strides[1] = __pyx_v_paths.strides[2]; - __pyx_t_4.suboffsets[1] = -1; - -__pyx_t_5.data = __pyx_v_values.data; - __pyx_t_5.memview = __pyx_v_values.memview; - __PYX_INC_MEMVIEW(&__pyx_t_5, 0); - { - Py_ssize_t __pyx_tmp_idx = __pyx_v_i; - Py_ssize_t __pyx_tmp_stride = __pyx_v_values.strides[0]; - __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; -} - -__pyx_t_5.shape[0] = __pyx_v_values.shape[1]; -__pyx_t_5.strides[0] = __pyx_v_values.strides[1]; - __pyx_t_5.suboffsets[0] = -1; - -__pyx_t_5.shape[1] = __pyx_v_values.shape[2]; -__pyx_t_5.strides[1] = __pyx_v_values.strides[2]; - __pyx_t_5.suboffsets[1] = -1; - -__pyx_t_6 = __pyx_v_i; - __pyx_t_7 = __pyx_v_i; - __pyx_f_15monotonic_align_4core_maximum_path_each(__pyx_t_4, __pyx_t_5, (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_ys.data) + __pyx_t_6)) ))), (*((int *) ( /* dim=0 */ ((char *) (((int *) __pyx_v_t_xs.data) + __pyx_t_7)) ))), NULL); - __PYX_XDEC_MEMVIEW(&__pyx_t_4, 0); - __pyx_t_4.memview = NULL; - __pyx_t_4.data = NULL; - __PYX_XDEC_MEMVIEW(&__pyx_t_5, 0); - __pyx_t_5.memview = NULL; - __pyx_t_5.data = NULL; - } - } - } - } - } - #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) - #undef likely - #undef unlikely - #define likely(x) __builtin_expect(!!(x), 1) - #define unlikely(x) __builtin_expect(!!(x), 0) - #endif - } - - /* "monotonic_align/core.pyx":41 - * cdef int b = paths.shape[0] - * cdef int i - * for i in prange(b, nogil=True): # <<<<<<<<<<<<<< - * maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) - */ - /*finally:*/ { - /*normal exit:*/{ - #ifdef WITH_THREAD - __Pyx_FastGIL_Forget(); - Py_BLOCK_THREADS - #endif - goto __pyx_L5; - } - __pyx_L5:; - } - } - - /* "monotonic_align/core.pyx":38 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: # <<<<<<<<<<<<<< - * cdef int b = paths.shape[0] - * cdef int i - */ - - /* function exit code */ -} - -/* Python wrapper */ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyObject *__pyx_pw_15monotonic_align_4core_1maximum_path_c(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - __Pyx_memviewslice __pyx_v_paths = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_values = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_ys = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_memviewslice __pyx_v_t_xs = { 0, 0, { 0 }, { 0 }, { 0 } }; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("maximum_path_c (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_paths,&__pyx_n_s_values,&__pyx_n_s_t_ys,&__pyx_n_s_t_xs,0}; - PyObject* values[4] = {0,0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_paths)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_values)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 1); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_ys)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 2); __PYX_ERR(0, 38, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_t_xs)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, 3); __PYX_ERR(0, 38, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "maximum_path_c") < 0)) __PYX_ERR(0, 38, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 4) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - } - __pyx_v_paths = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_paths.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_values = __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_values.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_ys = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_ys.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_v_t_xs = __Pyx_PyObject_to_MemoryviewSlice_dc_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_t_xs.memview)) __PYX_ERR(0, 38, __pyx_L3_error) - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("maximum_path_c", 1, 4, 4, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 38, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15monotonic_align_4core_maximum_path_c(__pyx_self, __pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15monotonic_align_4core_maximum_path_c(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_paths, __Pyx_memviewslice __pyx_v_values, __Pyx_memviewslice __pyx_v_t_ys, __Pyx_memviewslice __pyx_v_t_xs) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("maximum_path_c", 0); - __Pyx_XDECREF(__pyx_r); - if (unlikely(!__pyx_v_paths.memview)) { __Pyx_RaiseUnboundLocalError("paths"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_values.memview)) { __Pyx_RaiseUnboundLocalError("values"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_ys.memview)) { __Pyx_RaiseUnboundLocalError("t_ys"); __PYX_ERR(0, 38, __pyx_L1_error) } - if (unlikely(!__pyx_v_t_xs.memview)) { __Pyx_RaiseUnboundLocalError("t_xs"); __PYX_ERR(0, 38, __pyx_L1_error) } - __pyx_t_1 = __Pyx_void_to_None(__pyx_f_15monotonic_align_4core_maximum_path_c(__pyx_v_paths, __pyx_v_values, __pyx_v_t_ys, __pyx_v_t_xs, 0)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("monotonic_align.core.maximum_path_c", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __PYX_XDEC_MEMVIEW(&__pyx_v_paths, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_values, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_t_ys, 1); - __PYX_XDEC_MEMVIEW(&__pyx_v_t_xs, 1); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - -/* Python wrapper */ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_shape = 0; - Py_ssize_t __pyx_v_itemsize; - PyObject *__pyx_v_format = 0; - PyObject *__pyx_v_mode = 0; - int __pyx_v_allocate_buffer; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; - PyObject* values[5] = {0,0,0,0,0}; - values[3] = ((PyObject *)__pyx_n_s_c); - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 122, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 3: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode); - if (value) { values[3] = value; kw_args--; } - } - CYTHON_FALLTHROUGH; - case 4: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer); - if (value) { values[4] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 122, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); - CYTHON_FALLTHROUGH; - case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); - CYTHON_FALLTHROUGH; - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_shape = ((PyObject*)values[0]); - __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 122, __pyx_L3_error) - __pyx_v_format = values[2]; - __pyx_v_mode = values[3]; - if (values[4]) { - __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 123, __pyx_L3_error) - } else { - - /* "View.MemoryView":123 - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, - * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< - * - * cdef int idx - */ - __pyx_v_allocate_buffer = ((int)1); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 122, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 122, __pyx_L1_error) - if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { - PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 122, __pyx_L1_error) - } - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - goto __pyx_L0; - __pyx_L1_error:; - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { - int __pyx_v_idx; - Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_dim; - PyObject **__pyx_v_p; - char __pyx_v_order; - int __pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - char *__pyx_t_7; - int __pyx_t_8; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - Py_ssize_t __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - __Pyx_INCREF(__pyx_v_format); - - /* "View.MemoryView":129 - * cdef PyObject **p - * - * self.ndim = len(shape) # <<<<<<<<<<<<<< - * self.itemsize = itemsize - * - */ - if (unlikely(__pyx_v_shape == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 129, __pyx_L1_error) - } - __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 129, __pyx_L1_error) - __pyx_v_self->ndim = ((int)__pyx_t_1); - - /* "View.MemoryView":130 - * - * self.ndim = len(shape) - * self.itemsize = itemsize # <<<<<<<<<<<<<< - * - * if not self.ndim: - */ - __pyx_v_self->itemsize = __pyx_v_itemsize; - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 133, __pyx_L1_error) - - /* "View.MemoryView":132 - * self.itemsize = itemsize - * - * if not self.ndim: # <<<<<<<<<<<<<< - * raise ValueError("Empty shape tuple for cython.array") - * - */ - } - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 136, __pyx_L1_error) - - /* "View.MemoryView":135 - * raise ValueError("Empty shape tuple for cython.array") - * - * if itemsize <= 0: # <<<<<<<<<<<<<< - * raise ValueError("itemsize <= 0 for cython.array") - * - */ - } - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - __pyx_t_2 = PyBytes_Check(__pyx_v_format); - __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":139 - * - * if not isinstance(format, bytes): - * format = format.encode('ASCII') # <<<<<<<<<<<<<< - * self._format = format # keep a reference to the byte string - * self.format = self._format - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_6)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_6); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - } - } - __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII); - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 139, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":138 - * raise ValueError("itemsize <= 0 for cython.array") - * - * if not isinstance(format, bytes): # <<<<<<<<<<<<<< - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - */ - } - - /* "View.MemoryView":140 - * if not isinstance(format, bytes): - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< - * self.format = self._format - * - */ - if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 140, __pyx_L1_error) - __pyx_t_3 = __pyx_v_format; - __Pyx_INCREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_3); - __Pyx_GOTREF(__pyx_v_self->_format); - __Pyx_DECREF(__pyx_v_self->_format); - __pyx_v_self->_format = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":141 - * format = format.encode('ASCII') - * self._format = format # keep a reference to the byte string - * self.format = self._format # <<<<<<<<<<<<<< - * - * - */ - if (unlikely(__pyx_v_self->_format == Py_None)) { - PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); - __PYX_ERR(1, 141, __pyx_L1_error) - } - __pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(1, 141, __pyx_L1_error) - __pyx_v_self->format = __pyx_t_7; - - /* "View.MemoryView":144 - * - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< - * self._strides = self._shape + self.ndim - * - */ - __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); - - /* "View.MemoryView":145 - * - * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) - * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< - * - * if not self._shape: - */ - __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 148, __pyx_L1_error) - - /* "View.MemoryView":147 - * self._strides = self._shape + self.ndim - * - * if not self._shape: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate shape and strides.") - * - */ - } - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - __pyx_t_8 = 0; - __pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; - for (;;) { - if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 151, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 151, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_9; - __pyx_v_idx = __pyx_t_8; - __pyx_t_8 = (__pyx_t_8 + 1); - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":153 - * for idx, dim in enumerate(shape): - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< - * self._shape[idx] = dim - * - */ - __pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6); - __pyx_t_5 = 0; - __pyx_t_6 = 0; - __pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 153, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 153, __pyx_L1_error) - - /* "View.MemoryView":152 - * - * for idx, dim in enumerate(shape): - * if dim <= 0: # <<<<<<<<<<<<<< - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim - */ - } - - /* "View.MemoryView":154 - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - * self._shape[idx] = dim # <<<<<<<<<<<<<< - * - * cdef char order - */ - (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; - - /* "View.MemoryView":151 - * - * - * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< - * if dim <= 0: - * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 157, __pyx_L1_error) - if (__pyx_t_4) { - - /* "View.MemoryView":158 - * cdef char order - * if mode == 'fortran': - * order = b'F' # <<<<<<<<<<<<<< - * self.mode = u'fortran' - * elif mode == 'c': - */ - __pyx_v_order = 'F'; - - /* "View.MemoryView":159 - * if mode == 'fortran': - * order = b'F' - * self.mode = u'fortran' # <<<<<<<<<<<<<< - * elif mode == 'c': - * order = b'C' - */ - __Pyx_INCREF(__pyx_n_u_fortran); - __Pyx_GIVEREF(__pyx_n_u_fortran); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_fortran; - - /* "View.MemoryView":157 - * - * cdef char order - * if mode == 'fortran': # <<<<<<<<<<<<<< - * order = b'F' - * self.mode = u'fortran' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 160, __pyx_L1_error) - if (likely(__pyx_t_4)) { - - /* "View.MemoryView":161 - * self.mode = u'fortran' - * elif mode == 'c': - * order = b'C' # <<<<<<<<<<<<<< - * self.mode = u'c' - * else: - */ - __pyx_v_order = 'C'; - - /* "View.MemoryView":162 - * elif mode == 'c': - * order = b'C' - * self.mode = u'c' # <<<<<<<<<<<<<< - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - */ - __Pyx_INCREF(__pyx_n_u_c); - __Pyx_GIVEREF(__pyx_n_u_c); - __Pyx_GOTREF(__pyx_v_self->mode); - __Pyx_DECREF(__pyx_v_self->mode); - __pyx_v_self->mode = __pyx_n_u_c; - - /* "View.MemoryView":160 - * order = b'F' - * self.mode = u'fortran' - * elif mode == 'c': # <<<<<<<<<<<<<< - * order = b'C' - * self.mode = u'c' - */ - goto __pyx_L10; - } - - /* "View.MemoryView":164 - * self.mode = u'c' - * else: - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< - * - * self.len = fill_contig_strides_array(self._shape, self._strides, - */ - /*else*/ { - __pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 164, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 164, __pyx_L1_error) - } - __pyx_L10:; - - /* "View.MemoryView":166 - * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) - * - * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< - * itemsize, self.ndim, order) - * - */ - __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); - - /* "View.MemoryView":169 - * itemsize, self.ndim, order) - * - * self.free_data = allocate_buffer # <<<<<<<<<<<<<< - * self.dtype_is_object = format == b'O' - * if allocate_buffer: - */ - __pyx_v_self->free_data = __pyx_v_allocate_buffer; - - /* "View.MemoryView":170 - * - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< - * if allocate_buffer: - * - */ - __pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 170, __pyx_L1_error) - __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 170, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __pyx_v_self->dtype_is_object = __pyx_t_4; - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_4 = (__pyx_v_allocate_buffer != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":174 - * - * - * self.data = malloc(self.len) # <<<<<<<<<<<<<< - * if not self.data: - * raise MemoryError("unable to allocate array data.") - */ - __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); - - /* "View.MemoryView":175 - * - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":176 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_10); - __Pyx_Raise(__pyx_t_10, 0, 0, 0); - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - __PYX_ERR(1, 176, __pyx_L1_error) - - /* "View.MemoryView":175 - * - * self.data = malloc(self.len) - * if not self.data: # <<<<<<<<<<<<<< - * raise MemoryError("unable to allocate array data.") - * - */ - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len / itemsize): - */ - __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":179 - * - * if self.dtype_is_object: - * p = self.data # <<<<<<<<<<<<<< - * for i in range(self.len / itemsize): - * p[i] = Py_None - */ - __pyx_v_p = ((PyObject **)__pyx_v_self->data); - - /* "View.MemoryView":180 - * if self.dtype_is_object: - * p = self.data - * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< - * p[i] = Py_None - * Py_INCREF(Py_None) - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 180, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 180, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); - __pyx_t_9 = __pyx_t_1; - for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) { - __pyx_v_i = __pyx_t_11; - - /* "View.MemoryView":181 - * p = self.data - * for i in range(self.len / itemsize): - * p[i] = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - (__pyx_v_p[__pyx_v_i]) = Py_None; - - /* "View.MemoryView":182 - * for i in range(self.len / itemsize): - * p[i] = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - Py_INCREF(Py_None); - } - - /* "View.MemoryView":178 - * raise MemoryError("unable to allocate array data.") - * - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * p = self.data - * for i in range(self.len / itemsize): - */ - } - - /* "View.MemoryView":171 - * self.free_data = allocate_buffer - * self.dtype_is_object = format == b'O' - * if allocate_buffer: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":122 - * cdef bint dtype_is_object - * - * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< - * mode="c", bint allocate_buffer=True): - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_format); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_v_bufmode; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - char *__pyx_t_4; - Py_ssize_t __pyx_t_5; - int __pyx_t_6; - Py_ssize_t *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":186 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 # <<<<<<<<<<<<<< - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = -1; - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":188 - * cdef int bufmode = -1 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - */ - __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":187 - * def __getbuffer__(self, Py_buffer *info, int flags): - * cdef int bufmode = -1 - * if self.mode == u"c": # <<<<<<<<<<<<<< - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - */ - goto __pyx_L3; - } - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 189, __pyx_L1_error) - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":190 - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - */ - __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); - - /* "View.MemoryView":189 - * if self.mode == u"c": - * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * elif self.mode == u"fortran": # <<<<<<<<<<<<<< - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - */ - } - __pyx_L3:; - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 192, __pyx_L1_error) - - /* "View.MemoryView":191 - * elif self.mode == u"fortran": - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): # <<<<<<<<<<<<<< - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - */ - } - - /* "View.MemoryView":193 - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data # <<<<<<<<<<<<<< - * info.len = self.len - * info.ndim = self.ndim - */ - __pyx_t_4 = __pyx_v_self->data; - __pyx_v_info->buf = __pyx_t_4; - - /* "View.MemoryView":194 - * raise ValueError("Can only create a buffer that is contiguous in memory.") - * info.buf = self.data - * info.len = self.len # <<<<<<<<<<<<<< - * info.ndim = self.ndim - * info.shape = self._shape - */ - __pyx_t_5 = __pyx_v_self->len; - __pyx_v_info->len = __pyx_t_5; - - /* "View.MemoryView":195 - * info.buf = self.data - * info.len = self.len - * info.ndim = self.ndim # <<<<<<<<<<<<<< - * info.shape = self._shape - * info.strides = self._strides - */ - __pyx_t_6 = __pyx_v_self->ndim; - __pyx_v_info->ndim = __pyx_t_6; - - /* "View.MemoryView":196 - * info.len = self.len - * info.ndim = self.ndim - * info.shape = self._shape # <<<<<<<<<<<<<< - * info.strides = self._strides - * info.suboffsets = NULL - */ - __pyx_t_7 = __pyx_v_self->_shape; - __pyx_v_info->shape = __pyx_t_7; - - /* "View.MemoryView":197 - * info.ndim = self.ndim - * info.shape = self._shape - * info.strides = self._strides # <<<<<<<<<<<<<< - * info.suboffsets = NULL - * info.itemsize = self.itemsize - */ - __pyx_t_7 = __pyx_v_self->_strides; - __pyx_v_info->strides = __pyx_t_7; - - /* "View.MemoryView":198 - * info.shape = self._shape - * info.strides = self._strides - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * info.itemsize = self.itemsize - * info.readonly = 0 - */ - __pyx_v_info->suboffsets = NULL; - - /* "View.MemoryView":199 - * info.strides = self._strides - * info.suboffsets = NULL - * info.itemsize = self.itemsize # <<<<<<<<<<<<<< - * info.readonly = 0 - * - */ - __pyx_t_5 = __pyx_v_self->itemsize; - __pyx_v_info->itemsize = __pyx_t_5; - - /* "View.MemoryView":200 - * info.suboffsets = NULL - * info.itemsize = self.itemsize - * info.readonly = 0 # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - __pyx_v_info->readonly = 0; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":203 - * - * if flags & PyBUF_FORMAT: - * info.format = self.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_4 = __pyx_v_self->format; - __pyx_v_info->format = __pyx_t_4; - - /* "View.MemoryView":202 - * info.readonly = 0 - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.format - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":205 - * info.format = self.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.obj = self - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L5:; - - /* "View.MemoryView":207 - * info.format = NULL - * - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":185 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * cdef int bufmode = -1 - * if self.mode == u"c": - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":211 - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - -/* Python wrapper */ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":213 - * def __dealloc__(array self): - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) # <<<<<<<<<<<<<< - * elif self.free_data: - * if self.dtype_is_object: - */ - __pyx_v_self->callback_free_data(__pyx_v_self->data); - - /* "View.MemoryView":212 - * - * def __dealloc__(array self): - * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< - * self.callback_free_data(self.data) - * elif self.free_data: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - __pyx_t_1 = (__pyx_v_self->free_data != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":216 - * elif self.free_data: - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< - * self._strides, self.ndim, False) - * free(self.data) - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); - - /* "View.MemoryView":215 - * self.callback_free_data(self.data) - * elif self.free_data: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - */ - } - - /* "View.MemoryView":218 - * refcount_objects_in_slice(self.data, self._shape, - * self._strides, self.ndim, False) - * free(self.data) # <<<<<<<<<<<<<< - * PyObject_Free(self._shape) - * - */ - free(__pyx_v_self->data); - - /* "View.MemoryView":214 - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - * elif self.free_data: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * refcount_objects_in_slice(self.data, self._shape, - */ - } - __pyx_L3:; - - /* "View.MemoryView":219 - * self._strides, self.ndim, False) - * free(self.data) - * PyObject_Free(self._shape) # <<<<<<<<<<<<<< - * - * @property - */ - PyObject_Free(__pyx_v_self->_shape); - - /* "View.MemoryView":211 - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") - * - * def __dealloc__(array self): # <<<<<<<<<<<<<< - * if self.callback_free_data != NULL: - * self.callback_free_data(self.data) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":223 - * @property - * def memview(self): - * return self.get_memview() # <<<<<<<<<<<<<< - * - * @cname('get_memview') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 223, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":222 - * - * @property - * def memview(self): # <<<<<<<<<<<<<< - * return self.get_memview() - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - -static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_memview", 0); - - /* "View.MemoryView":227 - * @cname('get_memview') - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< - * return memoryview(self, flags, self.dtype_is_object) - * - */ - __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); - - /* "View.MemoryView":228 - * cdef get_memview(self): - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 228, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":226 - * - * @cname('get_memview') - * cdef get_memview(self): # <<<<<<<<<<<<<< - * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE - * return memoryview(self, flags, self.dtype_is_object) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":231 - * - * def __len__(self): - * return self._shape[0] # <<<<<<<<<<<<<< - * - * def __getattr__(self, attr): - */ - __pyx_r = (__pyx_v_self->_shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":230 - * return memoryview(self, flags, self.dtype_is_object) - * - * def __len__(self): # <<<<<<<<<<<<<< - * return self._shape[0] - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ -static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getattr__", 0); - - /* "View.MemoryView":234 - * - * def __getattr__(self, attr): - * return getattr(self.memview, attr) # <<<<<<<<<<<<<< - * - * def __getitem__(self, item): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 234, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":233 - * return self._shape[0] - * - * def __getattr__(self, attr): # <<<<<<<<<<<<<< - * return getattr(self.memview, attr) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - -/* Python wrapper */ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ -static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":237 - * - * def __getitem__(self, item): - * return self.memview[item] # <<<<<<<<<<<<<< - * - * def __setitem__(self, item, value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 237, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":236 - * return getattr(self.memview, attr) - * - * def __getitem__(self, item): # <<<<<<<<<<<<<< - * return self.memview[item] - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - -/* Python wrapper */ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - - /* "View.MemoryView":240 - * - * def __setitem__(self, item, value): - * self.memview[item] = value # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 240, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 240, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":239 - * return self.memview[item] - * - * def __setitem__(self, item, value): # <<<<<<<<<<<<<< - * self.memview[item] = value - * - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - -static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { - struct __pyx_array_obj *__pyx_v_result = 0; - struct __pyx_array_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("array_cwrapper", 0); - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":249 - * - * if buf == NULL: - * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); - __pyx_t_2 = 0; - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":248 - * cdef array result - * - * if buf == NULL: # <<<<<<<<<<<<<< - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - /*else*/ { - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_v_shape); - __Pyx_GIVEREF(__pyx_v_shape); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); - __pyx_t_4 = 0; - __pyx_t_5 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":252 - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) # <<<<<<<<<<<<<< - * result.data = buf - * - */ - __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 252, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 252, __pyx_L1_error) - - /* "View.MemoryView":251 - * result = array(shape, itemsize, format, mode.decode('ASCII')) - * else: - * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< - * allocate_buffer=False) - * result.data = buf - */ - __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 251, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); - __pyx_t_5 = 0; - - /* "View.MemoryView":253 - * result = array(shape, itemsize, format, mode.decode('ASCII'), - * allocate_buffer=False) - * result.data = buf # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->data = __pyx_v_buf; - } - __pyx_L3:; - - /* "View.MemoryView":255 - * result.data = buf - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":244 - * - * @cname("__pyx_array_new") - * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< - * char *mode, char *buf): - * cdef array result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - -/* Python wrapper */ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_name = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; - PyObject* values[1] = {0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 281, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - } - __pyx_v_name = values[0]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 281, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__init__", 0); - - /* "View.MemoryView":282 - * cdef object name - * def __init__(self, name): - * self.name = name # <<<<<<<<<<<<<< - * def __repr__(self): - * return self.name - */ - __Pyx_INCREF(__pyx_v_name); - __Pyx_GIVEREF(__pyx_v_name); - __Pyx_GOTREF(__pyx_v_self->name); - __Pyx_DECREF(__pyx_v_self->name); - __pyx_v_self->name = __pyx_v_name; - - /* "View.MemoryView":281 - * cdef class Enum(object): - * cdef object name - * def __init__(self, name): # <<<<<<<<<<<<<< - * self.name = name - * def __repr__(self): - */ - - /* function exit code */ - __pyx_r = 0; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - -/* Python wrapper */ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":284 - * self.name = name - * def __repr__(self): - * return self.name # <<<<<<<<<<<<<< - * - * cdef generic = Enum("") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->name); - __pyx_r = __pyx_v_self->name; - goto __pyx_L0; - - /* "View.MemoryView":283 - * def __init__(self, name): - * self.name = name - * def __repr__(self): # <<<<<<<<<<<<<< - * return self.name - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { - PyObject *__pyx_v_state = 0; - PyObject *__pyx_v__dict = 0; - int __pyx_v_use_setstate; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":5 - * cdef object _dict - * cdef bint use_setstate - * state = (self.name,) # <<<<<<<<<<<<<< - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v_self->name); - __Pyx_GIVEREF(__pyx_v_self->name); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); - __pyx_v_state = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "(tree fragment)":6 - * cdef bint use_setstate - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< - * if _dict is not None: - * state += (_dict,) - */ - __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v__dict = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - __pyx_t_2 = (__pyx_v__dict != Py_None); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":8 - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: - * state += (_dict,) # <<<<<<<<<<<<<< - * use_setstate = True - * else: - */ - __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(__pyx_v__dict); - __Pyx_GIVEREF(__pyx_v__dict); - PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); - __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); - __pyx_t_4 = 0; - - /* "(tree fragment)":9 - * if _dict is not None: - * state += (_dict,) - * use_setstate = True # <<<<<<<<<<<<<< - * else: - * use_setstate = self.name is not None - */ - __pyx_v_use_setstate = 1; - - /* "(tree fragment)":7 - * state = (self.name,) - * _dict = getattr(self, '__dict__', None) - * if _dict is not None: # <<<<<<<<<<<<<< - * state += (_dict,) - * use_setstate = True - */ - goto __pyx_L3; - } - - /* "(tree fragment)":11 - * use_setstate = True - * else: - * use_setstate = self.name is not None # <<<<<<<<<<<<<< - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_self->name != Py_None); - __pyx_v_use_setstate = __pyx_t_3; - } - __pyx_L3:; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - __pyx_t_3 = (__pyx_v_use_setstate != 0); - if (__pyx_t_3) { - - /* "(tree fragment)":13 - * use_setstate = self.name is not None - * if use_setstate: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); - __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); - __pyx_t_4 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "(tree fragment)":12 - * else: - * use_setstate = self.name is not None - * if use_setstate: # <<<<<<<<<<<<<< - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - */ - } - - /* "(tree fragment)":15 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); - __Pyx_INCREF(__pyx_int_184977713); - __Pyx_GIVEREF(__pyx_int_184977713); - PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); - __Pyx_INCREF(__pyx_v_state); - __Pyx_GIVEREF(__pyx_v_state); - PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); - __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_5); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); - __pyx_t_5 = 0; - __pyx_t_1 = 0; - __pyx_r = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L0; - } - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * cdef tuple state - * cdef object _dict - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_state); - __Pyx_XDECREF(__pyx_v__dict); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":17 - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): - * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error) - __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":16 - * else: - * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state(self, __pyx_state) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory - */ - -static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { - Py_intptr_t __pyx_v_aligned_p; - size_t __pyx_v_offset; - void *__pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":300 - * cdef void *align_pointer(void *memory, size_t alignment) nogil: - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory # <<<<<<<<<<<<<< - * cdef size_t offset - * - */ - __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); - - /* "View.MemoryView":304 - * - * with cython.cdivision(True): - * offset = aligned_p % alignment # <<<<<<<<<<<<<< - * - * if offset > 0: - */ - __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - __pyx_t_1 = ((__pyx_v_offset > 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":307 - * - * if offset > 0: - * aligned_p += alignment - offset # <<<<<<<<<<<<<< - * - * return aligned_p - */ - __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); - - /* "View.MemoryView":306 - * offset = aligned_p % alignment - * - * if offset > 0: # <<<<<<<<<<<<<< - * aligned_p += alignment - offset - * - */ - } - - /* "View.MemoryView":309 - * aligned_p += alignment - offset - * - * return aligned_p # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = ((void *)__pyx_v_aligned_p); - goto __pyx_L0; - - /* "View.MemoryView":298 - * - * @cname('__pyx_align_pointer') - * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< - * "Align pointer memory on a given boundary" - * cdef Py_intptr_t aligned_p = memory - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - -/* Python wrapper */ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v_obj = 0; - int __pyx_v_flags; - int __pyx_v_dtype_is_object; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 345, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (kw_args > 0) { - PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object); - if (value) { values[2] = value; kw_args--; } - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 345, __pyx_L3_error) - } - } else { - switch (PyTuple_GET_SIZE(__pyx_args)) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - break; - default: goto __pyx_L5_argtuple_error; - } - } - __pyx_v_obj = values[0]; - __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) - if (values[2]) { - __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) - } else { - __pyx_v_dtype_is_object = ((int)0); - } - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 345, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return -1; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__cinit__", 0); - - /* "View.MemoryView":346 - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj # <<<<<<<<<<<<<< - * self.flags = flags - * if type(self) is memoryview or obj is not None: - */ - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - __Pyx_GOTREF(__pyx_v_self->obj); - __Pyx_DECREF(__pyx_v_self->obj); - __pyx_v_self->obj = __pyx_v_obj; - - /* "View.MemoryView":347 - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): - * self.obj = obj - * self.flags = flags # <<<<<<<<<<<<<< - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - */ - __pyx_v_self->flags = __pyx_v_flags; - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); - __pyx_t_3 = (__pyx_t_2 != 0); - if (!__pyx_t_3) { - } else { - __pyx_t_1 = __pyx_t_3; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_3 = (__pyx_v_obj != Py_None); - __pyx_t_2 = (__pyx_t_3 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (__pyx_t_1) { - - /* "View.MemoryView":349 - * self.flags = flags - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - */ - __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 349, __pyx_L1_error) - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":351 - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; - - /* "View.MemoryView":352 - * if self.view.obj == NULL: - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * global __pyx_memoryview_thread_locks_used - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":350 - * if type(self) is memoryview or obj is not None: - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &self.view).obj = Py_None - * Py_INCREF(Py_None) - */ - } - - /* "View.MemoryView":348 - * self.obj = obj - * self.flags = flags - * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< - * __Pyx_GetBuffer(obj, &self.view, flags) - * if self.view.obj == NULL: - */ - } - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":356 - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - */ - __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - - /* "View.MemoryView":357 - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); - - /* "View.MemoryView":355 - * - * global __pyx_memoryview_thread_locks_used - * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":359 - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< - * if self.lock is NULL: - * raise MemoryError - */ - __pyx_v_self->lock = PyThread_allocate_lock(); - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":361 - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - PyErr_NoMemory(); __PYX_ERR(1, 361, __pyx_L1_error) - - /* "View.MemoryView":360 - * if self.lock is NULL: - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * - */ - } - - /* "View.MemoryView":358 - * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] - * __pyx_memoryview_thread_locks_used += 1 - * if self.lock is NULL: # <<<<<<<<<<<<<< - * self.lock = PyThread_allocate_lock() - * if self.lock is NULL: - */ - } - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":364 - * - * if flags & PyBUF_FORMAT: - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< - * else: - * self.dtype_is_object = dtype_is_object - */ - __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_self->dtype_is_object = __pyx_t_1; - - /* "View.MemoryView":363 - * raise MemoryError - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - */ - goto __pyx_L10; - } - - /* "View.MemoryView":366 - * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') - * else: - * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - */ - /*else*/ { - __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; - } - __pyx_L10:; - - /* "View.MemoryView":368 - * self.dtype_is_object = dtype_is_object - * - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< - * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL - */ - __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); - - /* "View.MemoryView":370 - * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( - * &self.acquisition_count[0], sizeof(__pyx_atomic_int)) - * self.typeinfo = NULL # <<<<<<<<<<<<<< - * - * def __dealloc__(memoryview self): - */ - __pyx_v_self->typeinfo = NULL; - - /* "View.MemoryView":345 - * cdef __Pyx_TypeInfo *typeinfo - * - * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< - * self.obj = obj - * self.flags = flags - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - -/* Python wrapper */ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { - int __pyx_v_i; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyThread_type_lock __pyx_t_6; - PyThread_type_lock __pyx_t_7; - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - __pyx_t_1 = (__pyx_v_self->obj != Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":374 - * def __dealloc__(memoryview self): - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - */ - __Pyx_ReleaseBuffer((&__pyx_v_self->view)); - - /* "View.MemoryView":373 - * - * def __dealloc__(memoryview self): - * if self.obj is not None: # <<<<<<<<<<<<<< - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - __pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":377 - * elif (<__pyx_buffer *> &self.view).obj == Py_None: - * - * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< - * Py_DECREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; - - /* "View.MemoryView":378 - * - * (<__pyx_buffer *> &self.view).obj = NULL - * Py_DECREF(Py_None) # <<<<<<<<<<<<<< - * - * cdef int i - */ - Py_DECREF(Py_None); - - /* "View.MemoryView":375 - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< - * - * (<__pyx_buffer *> &self.view).obj = NULL - */ - } - __pyx_L3:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":383 - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - */ - __pyx_t_3 = __pyx_memoryview_thread_locks_used; - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { - __pyx_v_i = __pyx_t_5; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":385 - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - */ - __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":388 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< - * break - * else: - */ - __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); - __pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]); - - /* "View.MemoryView":387 - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break - */ - (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6; - (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7; - - /* "View.MemoryView":386 - * if __pyx_memoryview_thread_locks[i] is self.lock: - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - */ - } - - /* "View.MemoryView":389 - * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( - * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) - * break # <<<<<<<<<<<<<< - * else: - * PyThread_free_lock(self.lock) - */ - goto __pyx_L6_break; - - /* "View.MemoryView":384 - * if self.lock != NULL: - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< - * __pyx_memoryview_thread_locks_used -= 1 - * if i != __pyx_memoryview_thread_locks_used: - */ - } - } - /*else*/ { - - /* "View.MemoryView":391 - * break - * else: - * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - */ - PyThread_free_lock(__pyx_v_self->lock); - } - __pyx_L6_break:; - - /* "View.MemoryView":382 - * cdef int i - * global __pyx_memoryview_thread_locks_used - * if self.lock != NULL: # <<<<<<<<<<<<<< - * for i in range(__pyx_memoryview_thread_locks_used): - * if __pyx_memoryview_thread_locks[i] is self.lock: - */ - } - - /* "View.MemoryView":372 - * self.typeinfo = NULL - * - * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< - * if self.obj is not None: - * __Pyx_ReleaseBuffer(&self.view) - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - -static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - Py_ssize_t __pyx_v_dim; - char *__pyx_v_itemp; - PyObject *__pyx_v_idx = NULL; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t __pyx_t_3; - PyObject *(*__pyx_t_4)(PyObject *); - PyObject *__pyx_t_5 = NULL; - Py_ssize_t __pyx_t_6; - char *__pyx_t_7; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_item_pointer", 0); - - /* "View.MemoryView":395 - * cdef char *get_item_pointer(memoryview self, object index) except NULL: - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< - * - * for dim, idx in enumerate(index): - */ - __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); - - /* "View.MemoryView":397 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - __pyx_t_1 = 0; - if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { - __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; - __pyx_t_4 = NULL; - } else { - __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 397, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_4)) { - if (likely(PyList_CheckExact(__pyx_t_2))) { - if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } else { - if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 397, __pyx_L1_error) - #else - __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 397, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - } - } else { - __pyx_t_5 = __pyx_t_4(__pyx_t_2); - if (unlikely(!__pyx_t_5)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 397, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_5); - } - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); - __pyx_t_5 = 0; - __pyx_v_dim = __pyx_t_1; - __pyx_t_1 = (__pyx_t_1 + 1); - - /* "View.MemoryView":398 - * - * for dim, idx in enumerate(index): - * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< - * - * return itemp - */ - __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) - __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 398, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_7; - - /* "View.MemoryView":397 - * cdef char *itemp = self.view.buf - * - * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - */ - } - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":400 - * itemp = pybuffer_index(&self.view, itemp, idx, dim) - * - * return itemp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_itemp; - goto __pyx_L0; - - /* "View.MemoryView":393 - * PyThread_free_lock(self.lock) - * - * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< - * cdef Py_ssize_t dim - * cdef char *itemp = self.view.buf - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ -static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_indices = NULL; - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - char *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__getitem__", 0); - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":405 - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: - * return self # <<<<<<<<<<<<<< - * - * have_slices, indices = _unellipsify(index, self.view.ndim) - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __pyx_r = ((PyObject *)__pyx_v_self); - goto __pyx_L0; - - /* "View.MemoryView":404 - * - * def __getitem__(memoryview self, object index): - * if index is Ellipsis: # <<<<<<<<<<<<<< - * return self - * - */ - } - - /* "View.MemoryView":407 - * return self - * - * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * cdef char *itemp - */ - __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (likely(__pyx_t_3 != Py_None)) { - PyObject* sequence = __pyx_t_3; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 407, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - #else - __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 407, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - #endif - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 407, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_v_indices = __pyx_t_5; - __pyx_t_5 = 0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 410, __pyx_L1_error) - if (__pyx_t_2) { - - /* "View.MemoryView":411 - * cdef char *itemp - * if have_slices: - * return memview_slice(self, indices) # <<<<<<<<<<<<<< - * else: - * itemp = self.get_item_pointer(indices) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":410 - * - * cdef char *itemp - * if have_slices: # <<<<<<<<<<<<<< - * return memview_slice(self, indices) - * else: - */ - } - - /* "View.MemoryView":413 - * return memview_slice(self, indices) - * else: - * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< - * return self.convert_item_to_object(itemp) - * - */ - /*else*/ { - __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 413, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_6; - - /* "View.MemoryView":414 - * else: - * itemp = self.get_item_pointer(indices) - * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< - * - * def __setitem__(memoryview self, object index, object value): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 414, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":403 - * - * - * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< - * if index is Ellipsis: - * return self - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_indices); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - -/* Python wrapper */ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ -static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - PyObject *__pyx_v_have_slices = NULL; - PyObject *__pyx_v_obj = NULL; - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setitem__", 0); - __Pyx_INCREF(__pyx_v_index); - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - __pyx_t_1 = (__pyx_v_self->view.readonly != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 418, __pyx_L1_error) - - /* "View.MemoryView":417 - * - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: # <<<<<<<<<<<<<< - * raise TypeError("Cannot assign to read-only memoryview") - * - */ - } - - /* "View.MemoryView":420 - * raise TypeError("Cannot assign to read-only memoryview") - * - * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< - * - * if have_slices: - */ - __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (likely(__pyx_t_2 != Py_None)) { - PyObject* sequence = __pyx_t_2; - Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); - if (unlikely(size != 2)) { - if (size > 2) __Pyx_RaiseTooManyValuesError(2); - else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); - __PYX_ERR(1, 420, __pyx_L1_error) - } - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); - __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); - __Pyx_INCREF(__pyx_t_3); - __Pyx_INCREF(__pyx_t_4); - #else - __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 420, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - #endif - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } else { - __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 420, __pyx_L1_error) - } - __pyx_v_have_slices = __pyx_t_3; - __pyx_t_3 = 0; - __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 422, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":423 - * - * if have_slices: - * obj = self.is_slice(value) # <<<<<<<<<<<<<< - * if obj: - * self.setitem_slice_assignment(self[index], obj) - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 423, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_v_obj = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 424, __pyx_L1_error) - if (__pyx_t_1) { - - /* "View.MemoryView":425 - * obj = self.is_slice(value) - * if obj: - * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< - * else: - * self.setitem_slice_assign_scalar(self[index], value) - */ - __pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 425, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - - /* "View.MemoryView":424 - * if have_slices: - * obj = self.is_slice(value) - * if obj: # <<<<<<<<<<<<<< - * self.setitem_slice_assignment(self[index], obj) - * else: - */ - goto __pyx_L5; - } - - /* "View.MemoryView":427 - * self.setitem_slice_assignment(self[index], obj) - * else: - * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< - * else: - * self.setitem_indexed(index, value) - */ - /*else*/ { - __pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(1, 427, __pyx_L1_error) - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 427, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L5:; - - /* "View.MemoryView":422 - * have_slices, index = _unellipsify(index, self.view.ndim) - * - * if have_slices: # <<<<<<<<<<<<<< - * obj = self.is_slice(value) - * if obj: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":429 - * self.setitem_slice_assign_scalar(self[index], value) - * else: - * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< - * - * cdef is_slice(self, obj): - */ - /*else*/ { - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 429, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_L4:; - - /* "View.MemoryView":416 - * return self.convert_item_to_object(itemp) - * - * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_have_slices); - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - -static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_slice", 0); - __Pyx_INCREF(__pyx_v_obj); - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_5); - /*try:*/ { - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":435 - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) # <<<<<<<<<<<<<< - * except TypeError: - * return None - */ - __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 435, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - - /* "View.MemoryView":434 - * if not isinstance(obj, memoryview): - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< - * self.dtype_is_object) - * except TypeError: - */ - __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_8); - __Pyx_INCREF(__pyx_v_obj); - __Pyx_GIVEREF(__pyx_v_obj); - PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); - __Pyx_GIVEREF(__pyx_t_7); - PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); - __pyx_t_6 = 0; - __pyx_t_7 = 0; - __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 434, __pyx_L4_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); - __pyx_t_7 = 0; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - } - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - goto __pyx_L9_try_end; - __pyx_L4_error:; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - - /* "View.MemoryView":436 - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - * except TypeError: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); - if (__pyx_t_9) { - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 436, __pyx_L6_except_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_GOTREF(__pyx_t_8); - __Pyx_GOTREF(__pyx_t_6); - - /* "View.MemoryView":437 - * self.dtype_is_object) - * except TypeError: - * return None # <<<<<<<<<<<<<< - * - * return obj - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - goto __pyx_L7_except_return; - } - goto __pyx_L6_except_error; - __pyx_L6_except_error:; - - /* "View.MemoryView":433 - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): - * try: # <<<<<<<<<<<<<< - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - * self.dtype_is_object) - */ - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L1_error; - __pyx_L7_except_return:; - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_XGIVEREF(__pyx_t_5); - __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); - goto __pyx_L0; - __pyx_L9_try_end:; - } - - /* "View.MemoryView":432 - * - * cdef is_slice(self, obj): - * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< - * try: - * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, - */ - } - - /* "View.MemoryView":439 - * return None - * - * return obj # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assignment(self, dst, src): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_obj); - __pyx_r = __pyx_v_obj; - goto __pyx_L0; - - /* "View.MemoryView":431 - * self.setitem_indexed(index, value) - * - * cdef is_slice(self, obj): # <<<<<<<<<<<<<< - * if not isinstance(obj, memoryview): - * try: - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_obj); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - -static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { - __Pyx_memviewslice __pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_src_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - __Pyx_memviewslice *__pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 445, __pyx_L1_error) - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 445, __pyx_L1_error) - - /* "View.MemoryView":446 - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< - * src.ndim, dst.ndim, self.dtype_is_object) - * - */ - if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 446, __pyx_L1_error) - __pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 446, __pyx_L1_error) - - /* "View.MemoryView":447 - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - */ - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 447, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":445 - * cdef __Pyx_memviewslice src_slice - * - * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< - * get_slice_from_memview(dst, &dst_slice)[0], - * src.ndim, dst.ndim, self.dtype_is_object) - */ - __pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 445, __pyx_L1_error) - - /* "View.MemoryView":441 - * return obj - * - * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice dst_slice - * cdef __Pyx_memviewslice src_slice - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - -static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { - int __pyx_v_array[0x80]; - void *__pyx_v_tmp; - void *__pyx_v_item; - __Pyx_memviewslice *__pyx_v_dst_slice; - __Pyx_memviewslice __pyx_v_tmp_slice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_t_4; - int __pyx_t_5; - char const *__pyx_t_6; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - PyObject *__pyx_t_9 = NULL; - PyObject *__pyx_t_10 = NULL; - PyObject *__pyx_t_11 = NULL; - PyObject *__pyx_t_12 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); - - /* "View.MemoryView":451 - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): - * cdef int array[128] - * cdef void *tmp = NULL # <<<<<<<<<<<<<< - * cdef void *item - * - */ - __pyx_v_tmp = NULL; - - /* "View.MemoryView":456 - * cdef __Pyx_memviewslice *dst_slice - * cdef __Pyx_memviewslice tmp_slice - * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< - * - * if self.view.itemsize > sizeof(array): - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 456, __pyx_L1_error) - __pyx_v_dst_slice = __pyx_t_1; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - __pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":459 - * - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< - * if tmp == NULL: - * raise MemoryError - */ - __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); - - /* "View.MemoryView":460 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - __pyx_t_2 = ((__pyx_v_tmp == NULL) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":461 - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - * raise MemoryError # <<<<<<<<<<<<<< - * item = tmp - * else: - */ - PyErr_NoMemory(); __PYX_ERR(1, 461, __pyx_L1_error) - - /* "View.MemoryView":460 - * if self.view.itemsize > sizeof(array): - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: # <<<<<<<<<<<<<< - * raise MemoryError - * item = tmp - */ - } - - /* "View.MemoryView":462 - * if tmp == NULL: - * raise MemoryError - * item = tmp # <<<<<<<<<<<<<< - * else: - * item = array - */ - __pyx_v_item = __pyx_v_tmp; - - /* "View.MemoryView":458 - * dst_slice = get_slice_from_memview(dst, &tmp_slice) - * - * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< - * tmp = PyMem_Malloc(self.view.itemsize) - * if tmp == NULL: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":464 - * item = tmp - * else: - * item = array # <<<<<<<<<<<<<< - * - * try: - */ - /*else*/ { - __pyx_v_item = ((void *)__pyx_v_array); - } - __pyx_L3:; - - /* "View.MemoryView":466 - * item = array - * - * try: # <<<<<<<<<<<<<< - * if self.dtype_is_object: - * ( item)[0] = value - */ - /*try:*/ { - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - __pyx_t_2 = (__pyx_v_self->dtype_is_object != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":468 - * try: - * if self.dtype_is_object: - * ( item)[0] = value # <<<<<<<<<<<<<< - * else: - * self.assign_item_from_object( item, value) - */ - (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); - - /* "View.MemoryView":467 - * - * try: - * if self.dtype_is_object: # <<<<<<<<<<<<<< - * ( item)[0] = value - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":470 - * ( item)[0] = value - * else: - * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< - * - * - */ - /*else*/ { - __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 470, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L8:; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - __pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":475 - * - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - * item, self.dtype_is_object) - */ - __pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 475, __pyx_L6_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":474 - * - * - * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, - */ - } - - /* "View.MemoryView":476 - * if self.view.suboffsets != NULL: - * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) - * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< - * item, self.dtype_is_object) - * finally: - */ - __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); - } - - /* "View.MemoryView":479 - * item, self.dtype_is_object) - * finally: - * PyMem_Free(tmp) # <<<<<<<<<<<<<< - * - * cdef setitem_indexed(self, index, value): - */ - /*finally:*/ { - /*normal exit:*/{ - PyMem_Free(__pyx_v_tmp); - goto __pyx_L7; - } - __pyx_L6_error:; - /*exception exit:*/{ - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; - if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); - if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_7); - __Pyx_XGOTREF(__pyx_t_8); - __Pyx_XGOTREF(__pyx_t_9); - __Pyx_XGOTREF(__pyx_t_10); - __Pyx_XGOTREF(__pyx_t_11); - __Pyx_XGOTREF(__pyx_t_12); - __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; - { - PyMem_Free(__pyx_v_tmp); - } - if (PY_MAJOR_VERSION >= 3) { - __Pyx_XGIVEREF(__pyx_t_10); - __Pyx_XGIVEREF(__pyx_t_11); - __Pyx_XGIVEREF(__pyx_t_12); - __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); - } - __Pyx_XGIVEREF(__pyx_t_7); - __Pyx_XGIVEREF(__pyx_t_8); - __Pyx_XGIVEREF(__pyx_t_9); - __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); - __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; - __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; - goto __pyx_L1_error; - } - __pyx_L7:; - } - - /* "View.MemoryView":449 - * src.ndim, dst.ndim, self.dtype_is_object) - * - * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< - * cdef int array[128] - * cdef void *tmp = NULL - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - -static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { - char *__pyx_v_itemp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - char *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("setitem_indexed", 0); - - /* "View.MemoryView":482 - * - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< - * self.assign_item_from_object(itemp, value) - * - */ - __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 482, __pyx_L1_error) - __pyx_v_itemp = __pyx_t_1; - - /* "View.MemoryView":483 - * cdef setitem_indexed(self, index, value): - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 483, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":481 - * PyMem_Free(tmp) - * - * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< - * cdef char *itemp = self.get_item_pointer(index) - * self.assign_item_from_object(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_v_struct = NULL; - PyObject *__pyx_v_bytesitem = 0; - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - int __pyx_t_8; - PyObject *__pyx_t_9 = NULL; - size_t __pyx_t_10; - int __pyx_t_11; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":488 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef bytes bytesitem - * - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 488, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":491 - * cdef bytes bytesitem - * - * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< - * try: - * result = struct.unpack(self.view.format, bytesitem) - */ - __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 491, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); - __Pyx_XGOTREF(__pyx_t_2); - __Pyx_XGOTREF(__pyx_t_3); - __Pyx_XGOTREF(__pyx_t_4); - /*try:*/ { - - /* "View.MemoryView":493 - * bytesitem = itemp[:self.view.itemsize] - * try: - * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< - * except struct.error: - * raise ValueError("Unable to convert item to object") - */ - __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = NULL; - __pyx_t_8 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { - __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); - if (likely(__pyx_t_7)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); - __Pyx_INCREF(__pyx_t_7); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_5, function); - __pyx_t_8 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { - PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; - __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } else - #endif - { - __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_9); - if (__pyx_t_7) { - __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; - } - __Pyx_GIVEREF(__pyx_t_6); - PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); - __Pyx_INCREF(__pyx_v_bytesitem); - __Pyx_GIVEREF(__pyx_v_bytesitem); - PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); - __pyx_t_6 = 0; - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 493, __pyx_L3_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __pyx_v_result = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - } - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - /*else:*/ { - __pyx_t_10 = strlen(__pyx_v_self->view.format); - __pyx_t_11 = ((__pyx_t_10 == 1) != 0); - if (__pyx_t_11) { - - /* "View.MemoryView":498 - * else: - * if len(self.view.format) == 1: - * return result[0] # <<<<<<<<<<<<<< - * return result - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 498, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L6_except_return; - - /* "View.MemoryView":497 - * raise ValueError("Unable to convert item to object") - * else: - * if len(self.view.format) == 1: # <<<<<<<<<<<<<< - * return result[0] - * return result - */ - } - - /* "View.MemoryView":499 - * if len(self.view.format) == 1: - * return result[0] - * return result # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_result); - __pyx_r = __pyx_v_result; - goto __pyx_L6_except_return; - } - __pyx_L3_error:; - __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; - - /* "View.MemoryView":494 - * try: - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: # <<<<<<<<<<<<<< - * raise ValueError("Unable to convert item to object") - * else: - */ - __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9); - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9); - __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0; - if (__pyx_t_8) { - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 494, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_9); - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GOTREF(__pyx_t_1); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 495, __pyx_L5_except_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_Raise(__pyx_t_6, 0, 0, 0); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - __PYX_ERR(1, 495, __pyx_L5_except_error) - } - goto __pyx_L5_except_error; - __pyx_L5_except_error:; - - /* "View.MemoryView":492 - * - * bytesitem = itemp[:self.view.itemsize] - * try: # <<<<<<<<<<<<<< - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - */ - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L1_error; - __pyx_L6_except_return:; - __Pyx_XGIVEREF(__pyx_t_2); - __Pyx_XGIVEREF(__pyx_t_3); - __Pyx_XGIVEREF(__pyx_t_4); - __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); - goto __pyx_L0; - } - - /* "View.MemoryView":485 - * self.assign_item_from_object(itemp, value) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesitem); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - -static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_v_struct = NULL; - char __pyx_v_c; - PyObject *__pyx_v_bytesvalue = 0; - Py_ssize_t __pyx_v_i; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_t_3; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - PyObject *__pyx_t_6 = NULL; - int __pyx_t_7; - PyObject *__pyx_t_8 = NULL; - Py_ssize_t __pyx_t_9; - PyObject *__pyx_t_10 = NULL; - char *__pyx_t_11; - char *__pyx_t_12; - char *__pyx_t_13; - char *__pyx_t_14; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":504 - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - * import struct # <<<<<<<<<<<<<< - * cdef char c - * cdef bytes bytesvalue - */ - __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 504, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_v_struct = __pyx_t_1; - __pyx_t_1 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - __pyx_t_2 = PyTuple_Check(__pyx_v_value); - __pyx_t_3 = (__pyx_t_2 != 0); - if (__pyx_t_3) { - - /* "View.MemoryView":510 - * - * if isinstance(value, tuple): - * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< - * else: - * bytesvalue = struct.pack(self.view.format, value) - */ - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); - __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 510, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 510, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - - /* "View.MemoryView":509 - * cdef Py_ssize_t i - * - * if isinstance(value, tuple): # <<<<<<<<<<<<<< - * bytesvalue = struct.pack(self.view.format, *value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":512 - * bytesvalue = struct.pack(self.view.format, *value) - * else: - * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< - * - * for i, c in enumerate(bytesvalue): - */ - /*else*/ { - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_5 = NULL; - __pyx_t_7 = 0; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_6, function); - __pyx_t_7 = 1; - } - } - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { - PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; - __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - } else - #endif - { - __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_8); - if (__pyx_t_5) { - __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; - } - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); - __Pyx_INCREF(__pyx_v_value); - __Pyx_GIVEREF(__pyx_v_value); - PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); - __pyx_t_1 = 0; - __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 512, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; - } - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 512, __pyx_L1_error) - __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); - __pyx_t_4 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = 0; - if (unlikely(__pyx_v_bytesvalue == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); - __PYX_ERR(1, 514, __pyx_L1_error) - } - __Pyx_INCREF(__pyx_v_bytesvalue); - __pyx_t_10 = __pyx_v_bytesvalue; - __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); - __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); - for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { - __pyx_t_11 = __pyx_t_14; - __pyx_v_c = (__pyx_t_11[0]); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - __pyx_v_i = __pyx_t_9; - - /* "View.MemoryView":514 - * bytesvalue = struct.pack(self.view.format, value) - * - * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< - * itemp[i] = c - * - */ - __pyx_t_9 = (__pyx_t_9 + 1); - - /* "View.MemoryView":515 - * - * for i, c in enumerate(bytesvalue): - * itemp[i] = c # <<<<<<<<<<<<<< - * - * @cname('getbuffer') - */ - (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; - } - __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; - - /* "View.MemoryView":501 - * return result - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * """Only used if instantiated manually by the user, or if Cython doesn't - * know how to convert the type""" - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_XDECREF(__pyx_t_10); - __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_struct); - __Pyx_XDECREF(__pyx_v_bytesvalue); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - -/* Python wrapper */ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ -static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - char *__pyx_t_5; - void *__pyx_t_6; - int __pyx_t_7; - Py_ssize_t __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - if (__pyx_v_info == NULL) { - PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); - return -1; - } - __Pyx_RefNannySetupContext("__getbuffer__", 0); - __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(__pyx_v_info->obj); - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_2 = (__pyx_v_self->view.readonly != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L4_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 520, __pyx_L1_error) - - /* "View.MemoryView":519 - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - */ - } - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":523 - * - * if flags & PyBUF_ND: - * info.shape = self.view.shape # <<<<<<<<<<<<<< - * else: - * info.shape = NULL - */ - __pyx_t_4 = __pyx_v_self->view.shape; - __pyx_v_info->shape = __pyx_t_4; - - /* "View.MemoryView":522 - * raise ValueError("Cannot create writable memory view from read-only memoryview") - * - * if flags & PyBUF_ND: # <<<<<<<<<<<<<< - * info.shape = self.view.shape - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":525 - * info.shape = self.view.shape - * else: - * info.shape = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_STRIDES: - */ - /*else*/ { - __pyx_v_info->shape = NULL; - } - __pyx_L6:; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":528 - * - * if flags & PyBUF_STRIDES: - * info.strides = self.view.strides # <<<<<<<<<<<<<< - * else: - * info.strides = NULL - */ - __pyx_t_4 = __pyx_v_self->view.strides; - __pyx_v_info->strides = __pyx_t_4; - - /* "View.MemoryView":527 - * info.shape = NULL - * - * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< - * info.strides = self.view.strides - * else: - */ - goto __pyx_L7; - } - - /* "View.MemoryView":530 - * info.strides = self.view.strides - * else: - * info.strides = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_INDIRECT: - */ - /*else*/ { - __pyx_v_info->strides = NULL; - } - __pyx_L7:; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":533 - * - * if flags & PyBUF_INDIRECT: - * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< - * else: - * info.suboffsets = NULL - */ - __pyx_t_4 = __pyx_v_self->view.suboffsets; - __pyx_v_info->suboffsets = __pyx_t_4; - - /* "View.MemoryView":532 - * info.strides = NULL - * - * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< - * info.suboffsets = self.view.suboffsets - * else: - */ - goto __pyx_L8; - } - - /* "View.MemoryView":535 - * info.suboffsets = self.view.suboffsets - * else: - * info.suboffsets = NULL # <<<<<<<<<<<<<< - * - * if flags & PyBUF_FORMAT: - */ - /*else*/ { - __pyx_v_info->suboffsets = NULL; - } - __pyx_L8:; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":538 - * - * if flags & PyBUF_FORMAT: - * info.format = self.view.format # <<<<<<<<<<<<<< - * else: - * info.format = NULL - */ - __pyx_t_5 = __pyx_v_self->view.format; - __pyx_v_info->format = __pyx_t_5; - - /* "View.MemoryView":537 - * info.suboffsets = NULL - * - * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< - * info.format = self.view.format - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":540 - * info.format = self.view.format - * else: - * info.format = NULL # <<<<<<<<<<<<<< - * - * info.buf = self.view.buf - */ - /*else*/ { - __pyx_v_info->format = NULL; - } - __pyx_L9:; - - /* "View.MemoryView":542 - * info.format = NULL - * - * info.buf = self.view.buf # <<<<<<<<<<<<<< - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - */ - __pyx_t_6 = __pyx_v_self->view.buf; - __pyx_v_info->buf = __pyx_t_6; - - /* "View.MemoryView":543 - * - * info.buf = self.view.buf - * info.ndim = self.view.ndim # <<<<<<<<<<<<<< - * info.itemsize = self.view.itemsize - * info.len = self.view.len - */ - __pyx_t_7 = __pyx_v_self->view.ndim; - __pyx_v_info->ndim = __pyx_t_7; - - /* "View.MemoryView":544 - * info.buf = self.view.buf - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< - * info.len = self.view.len - * info.readonly = self.view.readonly - */ - __pyx_t_8 = __pyx_v_self->view.itemsize; - __pyx_v_info->itemsize = __pyx_t_8; - - /* "View.MemoryView":545 - * info.ndim = self.view.ndim - * info.itemsize = self.view.itemsize - * info.len = self.view.len # <<<<<<<<<<<<<< - * info.readonly = self.view.readonly - * info.obj = self - */ - __pyx_t_8 = __pyx_v_self->view.len; - __pyx_v_info->len = __pyx_t_8; - - /* "View.MemoryView":546 - * info.itemsize = self.view.itemsize - * info.len = self.view.len - * info.readonly = self.view.readonly # <<<<<<<<<<<<<< - * info.obj = self - * - */ - __pyx_t_1 = __pyx_v_self->view.readonly; - __pyx_v_info->readonly = __pyx_t_1; - - /* "View.MemoryView":547 - * info.len = self.view.len - * info.readonly = self.view.readonly - * info.obj = self # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_INCREF(((PyObject *)__pyx_v_self)); - __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); - __pyx_v_info->obj = ((PyObject *)__pyx_v_self); - - /* "View.MemoryView":518 - * - * @cname('getbuffer') - * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") - */ - - /* function exit code */ - __pyx_r = 0; - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - if (__pyx_v_info->obj != NULL) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - goto __pyx_L2; - __pyx_L0:; - if (__pyx_v_info->obj == Py_None) { - __Pyx_GOTREF(__pyx_v_info->obj); - __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; - } - __pyx_L2:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":554 - * @property - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< - * transpose_memslice(&result.from_slice) - * return result - */ - __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 554, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 554, __pyx_L1_error) - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":555 - * def T(self): - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 555, __pyx_L1_error) - - /* "View.MemoryView":556 - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - * return result # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":553 - * - * @property - * def T(self): # <<<<<<<<<<<<<< - * cdef _memoryviewslice result = memoryview_copy(self) - * transpose_memslice(&result.from_slice) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":560 - * @property - * def base(self): - * return self.obj # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->obj); - __pyx_r = __pyx_v_self->obj; - goto __pyx_L0; - - /* "View.MemoryView":559 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.obj - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_length; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":564 - * @property - * def shape(self): - * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_length = (__pyx_t_2[0]); - __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - } - __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 564, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":563 - * - * @property - * def shape(self): # <<<<<<<<<<<<<< - * return tuple([length for length in self.view.shape[:self.view.ndim]]) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_stride; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 570, __pyx_L1_error) - - /* "View.MemoryView":568 - * @property - * def strides(self): - * if self.view.strides == NULL: # <<<<<<<<<<<<<< - * - * raise ValueError("Buffer view does not expose strides") - */ - } - - /* "View.MemoryView":572 - * raise ValueError("Buffer view does not expose strides") - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_v_stride = (__pyx_t_3[0]); - __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - } - __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 572, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_6; - __pyx_t_6 = 0; - goto __pyx_L0; - - /* "View.MemoryView":567 - * - * @property - * def strides(self): # <<<<<<<<<<<<<< - * if self.view.strides == NULL: - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - Py_ssize_t *__pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__13, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":576 - * @property - * def suboffsets(self): - * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< - * return (-1,) * self.view.ndim - * - */ - } - - /* "View.MemoryView":579 - * return (-1,) * self.view.ndim - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); - for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { - __pyx_t_4 = __pyx_t_6; - __pyx_v_suboffset = (__pyx_t_4[0]); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - } - __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":575 - * - * @property - * def suboffsets(self): # <<<<<<<<<<<<<< - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":583 - * @property - * def ndim(self): - * return self.view.ndim # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 583, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":582 - * - * @property - * def ndim(self): # <<<<<<<<<<<<<< - * return self.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":587 - * @property - * def itemsize(self): - * return self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 587, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":586 - * - * @property - * def itemsize(self): # <<<<<<<<<<<<<< - * return self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":591 - * @property - * def nbytes(self): - * return self.size * self.view.itemsize # <<<<<<<<<<<<<< - * - * @property - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 591, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_3; - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":590 - * - * @property - * def nbytes(self): # <<<<<<<<<<<<<< - * return self.size * self.view.itemsize - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_v_result = NULL; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - Py_ssize_t *__pyx_t_5; - PyObject *__pyx_t_6 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - __pyx_t_1 = (__pyx_v_self->_size == Py_None); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":596 - * def size(self): - * if self._size is None: - * result = 1 # <<<<<<<<<<<<<< - * - * for length in self.view.shape[:self.view.ndim]: - */ - __Pyx_INCREF(__pyx_int_1); - __pyx_v_result = __pyx_int_1; - - /* "View.MemoryView":598 - * result = 1 - * - * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< - * result *= length - * - */ - __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); - for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { - __pyx_t_3 = __pyx_t_5; - __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 598, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); - __pyx_t_6 = 0; - - /* "View.MemoryView":599 - * - * for length in self.view.shape[:self.view.ndim]: - * result *= length # <<<<<<<<<<<<<< - * - * self._size = result - */ - __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 599, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); - __pyx_t_6 = 0; - } - - /* "View.MemoryView":601 - * result *= length - * - * self._size = result # <<<<<<<<<<<<<< - * - * return self._size - */ - __Pyx_INCREF(__pyx_v_result); - __Pyx_GIVEREF(__pyx_v_result); - __Pyx_GOTREF(__pyx_v_self->_size); - __Pyx_DECREF(__pyx_v_self->_size); - __pyx_v_self->_size = __pyx_v_result; - - /* "View.MemoryView":595 - * @property - * def size(self): - * if self._size is None: # <<<<<<<<<<<<<< - * result = 1 - * - */ - } - - /* "View.MemoryView":603 - * self._size = result - * - * return self._size # <<<<<<<<<<<<<< - * - * def __len__(self): - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->_size); - __pyx_r = __pyx_v_self->_size; - goto __pyx_L0; - - /* "View.MemoryView":594 - * - * @property - * def size(self): # <<<<<<<<<<<<<< - * if self._size is None: - * result = 1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_6); - __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - -/* Python wrapper */ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ -static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { - Py_ssize_t __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("__len__", 0); - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":607 - * def __len__(self): - * if self.view.ndim >= 1: - * return self.view.shape[0] # <<<<<<<<<<<<<< - * - * return 0 - */ - __pyx_r = (__pyx_v_self->view.shape[0]); - goto __pyx_L0; - - /* "View.MemoryView":606 - * - * def __len__(self): - * if self.view.ndim >= 1: # <<<<<<<<<<<<<< - * return self.view.shape[0] - * - */ - } - - /* "View.MemoryView":609 - * return self.view.shape[0] - * - * return 0 # <<<<<<<<<<<<<< - * - * def __repr__(self): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":605 - * return self._size - * - * def __len__(self): # <<<<<<<<<<<<<< - * if self.view.ndim >= 1: - * return self.view.shape[0] - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__repr__", 0); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - - /* "View.MemoryView":613 - * def __repr__(self): - * return "" % (self.base.__class__.__name__, - * id(self)) # <<<<<<<<<<<<<< - * - * def __str__(self): - */ - __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 613, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - - /* "View.MemoryView":612 - * - * def __repr__(self): - * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< - * id(self)) - * - */ - __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 612, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":611 - * return 0 - * - * def __repr__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__, - * id(self)) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__str__", 0); - - /* "View.MemoryView":616 - * - * def __str__(self): - * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); - __pyx_t_1 = 0; - __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 616, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":615 - * id(self)) - * - * def __str__(self): # <<<<<<<<<<<<<< - * return "" % (self.base.__class__.__name__,) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_c_contig", 0); - - /* "View.MemoryView":622 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 622, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":623 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< - * - * def is_f_contig(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 623, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":619 - * - * - * def is_c_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice *__pyx_v_mslice; - __Pyx_memviewslice __pyx_v_tmp; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice *__pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("is_f_contig", 0); - - /* "View.MemoryView":628 - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - */ - __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 628, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":629 - * cdef __Pyx_memviewslice tmp - * mslice = get_slice_from_memview(self, &tmp) - * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< - * - * def copy(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":625 - * return slice_is_contig(mslice[0], 'C', self.view.ndim) - * - * def is_f_contig(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice *mslice - * cdef __Pyx_memviewslice tmp - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_mslice; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy", 0); - - /* "View.MemoryView":633 - * def copy(self): - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &mslice) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); - - /* "View.MemoryView":635 - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - * - * slice_copy(self, &mslice) # <<<<<<<<<<<<<< - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); - - /* "View.MemoryView":636 - * - * slice_copy(self, &mslice) - * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_C_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) - __pyx_v_mslice = __pyx_t_1; - - /* "View.MemoryView":641 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< - * - * def copy_fortran(self): - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":631 - * return slice_is_contig(mslice[0], 'F', self.view.ndim) - * - * def copy(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice mslice - * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - -/* Python wrapper */ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); - __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - int __pyx_v_flags; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_memviewslice __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("copy_fortran", 0); - - /* "View.MemoryView":645 - * def copy_fortran(self): - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< - * - * slice_copy(self, &src) - */ - __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); - - /* "View.MemoryView":647 - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - * - * slice_copy(self, &src) # <<<<<<<<<<<<<< - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, - * self.view.itemsize, - */ - __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); - - /* "View.MemoryView":648 - * - * slice_copy(self, &src) - * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< - * self.view.itemsize, - * flags|PyBUF_F_CONTIGUOUS, - */ - __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 648, __pyx_L1_error) - __pyx_v_dst = __pyx_t_1; - - /* "View.MemoryView":653 - * self.dtype_is_object) - * - * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 653, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":643 - * return memoryview_copy_from_slice(self, &mslice) - * - * def copy_fortran(self): # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice src, dst - * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - -static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { - struct __pyx_memoryview_obj *__pyx_v_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); - - /* "View.MemoryView":658 - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< - * result.typeinfo = typeinfo - * return result - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_o); - __Pyx_GIVEREF(__pyx_v_o); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":659 - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo # <<<<<<<<<<<<<< - * return result - * - */ - __pyx_v_result->typeinfo = __pyx_v_typeinfo; - - /* "View.MemoryView":660 - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_check') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":657 - * - * @cname('__pyx_memoryview_new') - * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< - * cdef memoryview result = memoryview(o, flags, dtype_is_object) - * result.typeinfo = typeinfo - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - -static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - __Pyx_RefNannySetupContext("memoryview_check", 0); - - /* "View.MemoryView":664 - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): - * return isinstance(o, memoryview) # <<<<<<<<<<<<<< - * - * cdef tuple _unellipsify(object index, int ndim): - */ - __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); - __pyx_r = __pyx_t_1; - goto __pyx_L0; - - /* "View.MemoryView":663 - * - * @cname('__pyx_memoryview_check') - * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< - * return isinstance(o, memoryview) - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - -static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { - PyObject *__pyx_v_tup = NULL; - PyObject *__pyx_v_result = NULL; - int __pyx_v_have_slices; - int __pyx_v_seen_ellipsis; - CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; - PyObject *__pyx_v_item = NULL; - Py_ssize_t __pyx_v_nslices; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - Py_ssize_t __pyx_t_5; - PyObject *(*__pyx_t_6)(PyObject *); - PyObject *__pyx_t_7 = NULL; - Py_ssize_t __pyx_t_8; - int __pyx_t_9; - int __pyx_t_10; - PyObject *__pyx_t_11 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("_unellipsify", 0); - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - __pyx_t_1 = PyTuple_Check(__pyx_v_index); - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":672 - * """ - * if not isinstance(index, tuple): - * tup = (index,) # <<<<<<<<<<<<<< - * else: - * tup = index - */ - __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 672, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_index); - __Pyx_GIVEREF(__pyx_v_index); - PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); - __pyx_v_tup = __pyx_t_3; - __pyx_t_3 = 0; - - /* "View.MemoryView":671 - * full slices. - * """ - * if not isinstance(index, tuple): # <<<<<<<<<<<<<< - * tup = (index,) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":674 - * tup = (index,) - * else: - * tup = index # <<<<<<<<<<<<<< - * - * result = [] - */ - /*else*/ { - __Pyx_INCREF(__pyx_v_index); - __pyx_v_tup = __pyx_v_index; - } - __pyx_L3:; - - /* "View.MemoryView":676 - * tup = index - * - * result = [] # <<<<<<<<<<<<<< - * have_slices = False - * seen_ellipsis = False - */ - __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 676, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_v_result = ((PyObject*)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":677 - * - * result = [] - * have_slices = False # <<<<<<<<<<<<<< - * seen_ellipsis = False - * for idx, item in enumerate(tup): - */ - __pyx_v_have_slices = 0; - - /* "View.MemoryView":678 - * result = [] - * have_slices = False - * seen_ellipsis = False # <<<<<<<<<<<<<< - * for idx, item in enumerate(tup): - * if item is Ellipsis: - */ - __pyx_v_seen_ellipsis = 0; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - __Pyx_INCREF(__pyx_int_0); - __pyx_t_3 = __pyx_int_0; - if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { - __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; - __pyx_t_6 = NULL; - } else { - __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 679, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_6)) { - if (likely(PyList_CheckExact(__pyx_t_4))) { - if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } else { - if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 679, __pyx_L1_error) - #else - __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - #endif - } - } else { - __pyx_t_7 = __pyx_t_6(__pyx_t_4); - if (unlikely(!__pyx_t_7)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 679, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_7); - } - __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); - __pyx_t_7 = 0; - __Pyx_INCREF(__pyx_t_3); - __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); - __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 679, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_3); - __pyx_t_3 = __pyx_t_7; - __pyx_t_7 = 0; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) - __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__16); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - - /* "View.MemoryView":683 - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True # <<<<<<<<<<<<<< - * else: - * result.append(slice(None)) - */ - __pyx_v_seen_ellipsis = 1; - - /* "View.MemoryView":681 - * for idx, item in enumerate(tup): - * if item is Ellipsis: - * if not seen_ellipsis: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - * seen_ellipsis = True - */ - goto __pyx_L7; - } - - /* "View.MemoryView":685 - * seen_ellipsis = True - * else: - * result.append(slice(None)) # <<<<<<<<<<<<<< - * have_slices = True - * else: - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__16); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 685, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":686 - * else: - * result.append(slice(None)) - * have_slices = True # <<<<<<<<<<<<<< - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - */ - __pyx_v_have_slices = 1; - - /* "View.MemoryView":680 - * seen_ellipsis = False - * for idx, item in enumerate(tup): - * if item is Ellipsis: # <<<<<<<<<<<<<< - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) - */ - goto __pyx_L6; - } - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - /*else*/ { - __pyx_t_2 = PySlice_Check(__pyx_v_item); - __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); - if (__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); - __pyx_t_1 = __pyx_t_10; - __pyx_L9_bool_binop_done:; - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":689 - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): - * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< - * - * have_slices = have_slices or isinstance(item, slice) - */ - __pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 689, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_Raise(__pyx_t_11, 0, 0, 0); - __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; - __PYX_ERR(1, 689, __pyx_L1_error) - - /* "View.MemoryView":688 - * have_slices = True - * else: - * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - */ - } - - /* "View.MemoryView":691 - * raise TypeError("Cannot index with type '%s'" % type(item)) - * - * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< - * result.append(item) - * - */ - __pyx_t_10 = (__pyx_v_have_slices != 0); - if (!__pyx_t_10) { - } else { - __pyx_t_1 = __pyx_t_10; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = PySlice_Check(__pyx_v_item); - __pyx_t_2 = (__pyx_t_10 != 0); - __pyx_t_1 = __pyx_t_2; - __pyx_L11_bool_binop_done:; - __pyx_v_have_slices = __pyx_t_1; - - /* "View.MemoryView":692 - * - * have_slices = have_slices or isinstance(item, slice) - * result.append(item) # <<<<<<<<<<<<<< - * - * nslices = ndim - len(result) - */ - __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 692, __pyx_L1_error) - } - __pyx_L6:; - - /* "View.MemoryView":679 - * have_slices = False - * seen_ellipsis = False - * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< - * if item is Ellipsis: - * if not seen_ellipsis: - */ - } - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":694 - * result.append(item) - * - * nslices = ndim - len(result) # <<<<<<<<<<<<<< - * if nslices: - * result.extend([slice(None)] * nslices) - */ - __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 694, __pyx_L1_error) - __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - __pyx_t_1 = (__pyx_v_nslices != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":696 - * nslices = ndim - len(result) - * if nslices: - * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< - * - * return have_slices or nslices, tuple(result) - */ - __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 696, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - { Py_ssize_t __pyx_temp; - for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { - __Pyx_INCREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); - } - } - __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 696, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":695 - * - * nslices = ndim - len(result) - * if nslices: # <<<<<<<<<<<<<< - * result.extend([slice(None)] * nslices) - * - */ - } - - /* "View.MemoryView":698 - * result.extend([slice(None)] * nslices) - * - * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - */ - __Pyx_XDECREF(__pyx_r); - if (!__pyx_v_have_slices) { - } else { - __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - goto __pyx_L14_bool_binop_done; - } - __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_3 = __pyx_t_4; - __pyx_t_4 = 0; - __pyx_L14_bool_binop_done:; - __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 698, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_11); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3); - __Pyx_GIVEREF(__pyx_t_4); - PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4); - __pyx_t_3 = 0; - __pyx_t_4 = 0; - __pyx_r = ((PyObject*)__pyx_t_11); - __pyx_t_11 = 0; - goto __pyx_L0; - - /* "View.MemoryView":666 - * return isinstance(o, memoryview) - * - * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< - * """ - * Replace all ellipses with full slices and fill incomplete indices with - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_11); - __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v_tup); - __Pyx_XDECREF(__pyx_v_result); - __Pyx_XDECREF(__pyx_v_idx); - __Pyx_XDECREF(__pyx_v_item); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - -static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - int __pyx_t_4; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); - - /* "View.MemoryView":701 - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") - */ - __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); - for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { - __pyx_t_1 = __pyx_t_3; - __pyx_v_suboffset = (__pyx_t_1[0]); - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); - if (unlikely(__pyx_t_4)) { - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __Pyx_Raise(__pyx_t_5, 0, 0, 0); - __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; - __PYX_ERR(1, 703, __pyx_L1_error) - - /* "View.MemoryView":702 - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * raise ValueError("Indirect dimensions not supported") - * - */ - } - } - - /* "View.MemoryView":700 - * return have_slices or nslices, tuple(result) - * - * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - -static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { - int __pyx_v_new_ndim; - int __pyx_v_suboffset_dim; - int __pyx_v_dim; - __Pyx_memviewslice __pyx_v_src; - __Pyx_memviewslice __pyx_v_dst; - __Pyx_memviewslice *__pyx_v_p_src; - struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; - __Pyx_memviewslice *__pyx_v_p_dst; - int *__pyx_v_p_suboffset_dim; - Py_ssize_t __pyx_v_start; - Py_ssize_t __pyx_v_stop; - Py_ssize_t __pyx_v_step; - int __pyx_v_have_start; - int __pyx_v_have_stop; - int __pyx_v_have_step; - PyObject *__pyx_v_index = NULL; - struct __pyx_memoryview_obj *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - struct __pyx_memoryview_obj *__pyx_t_4; - char *__pyx_t_5; - int __pyx_t_6; - Py_ssize_t __pyx_t_7; - PyObject *(*__pyx_t_8)(PyObject *); - PyObject *__pyx_t_9 = NULL; - Py_ssize_t __pyx_t_10; - int __pyx_t_11; - Py_ssize_t __pyx_t_12; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memview_slice", 0); - - /* "View.MemoryView":711 - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): - * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< - * cdef bint negative_step - * cdef __Pyx_memviewslice src, dst - */ - __pyx_v_new_ndim = 0; - __pyx_v_suboffset_dim = -1; - - /* "View.MemoryView":718 - * - * - * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< - * - * cdef _memoryviewslice memviewsliceobj - */ - (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); - - /* "View.MemoryView":722 - * cdef _memoryviewslice memviewsliceobj - * - * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - #ifndef CYTHON_WITHOUT_ASSERTIONS - if (unlikely(!Py_OptimizeFlag)) { - if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { - PyErr_SetNone(PyExc_AssertionError); - __PYX_ERR(1, 722, __pyx_L1_error) - } - } - #endif - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":725 - * - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview # <<<<<<<<<<<<<< - * p_src = &memviewsliceobj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 725, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":726 - * if isinstance(memview, _memoryviewslice): - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, &src) - */ - __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); - - /* "View.MemoryView":724 - * assert memview.view.ndim > 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * memviewsliceobj = memview - * p_src = &memviewsliceobj.from_slice - */ - goto __pyx_L3; - } - - /* "View.MemoryView":728 - * p_src = &memviewsliceobj.from_slice - * else: - * slice_copy(memview, &src) # <<<<<<<<<<<<<< - * p_src = &src - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); - - /* "View.MemoryView":729 - * else: - * slice_copy(memview, &src) - * p_src = &src # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_p_src = (&__pyx_v_src); - } - __pyx_L3:; - - /* "View.MemoryView":735 - * - * - * dst.memview = p_src.memview # <<<<<<<<<<<<<< - * dst.data = p_src.data - * - */ - __pyx_t_4 = __pyx_v_p_src->memview; - __pyx_v_dst.memview = __pyx_t_4; - - /* "View.MemoryView":736 - * - * dst.memview = p_src.memview - * dst.data = p_src.data # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_v_p_src->data; - __pyx_v_dst.data = __pyx_t_5; - - /* "View.MemoryView":741 - * - * - * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< - * cdef int *p_suboffset_dim = &suboffset_dim - * cdef Py_ssize_t start, stop, step - */ - __pyx_v_p_dst = (&__pyx_v_dst); - - /* "View.MemoryView":742 - * - * cdef __Pyx_memviewslice *p_dst = &dst - * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< - * cdef Py_ssize_t start, stop, step - * cdef bint have_start, have_stop, have_step - */ - __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - __pyx_t_6 = 0; - if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { - __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; - __pyx_t_8 = NULL; - } else { - __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 746, __pyx_L1_error) - } - for (;;) { - if (likely(!__pyx_t_8)) { - if (likely(PyList_CheckExact(__pyx_t_3))) { - if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } else { - if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; - #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 746, __pyx_L1_error) - #else - __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 746, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - #endif - } - } else { - __pyx_t_9 = __pyx_t_8(__pyx_t_3); - if (unlikely(!__pyx_t_9)) { - PyObject* exc_type = PyErr_Occurred(); - if (exc_type) { - if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); - else __PYX_ERR(1, 746, __pyx_L1_error) - } - break; - } - __Pyx_GOTREF(__pyx_t_9); - } - __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); - __pyx_t_9 = 0; - __pyx_v_dim = __pyx_t_6; - __pyx_t_6 = (__pyx_t_6 + 1); - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":751 - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< - * 0, 0, 0, # have_{start,stop,step} - * False) - */ - __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 751, __pyx_L1_error) - - /* "View.MemoryView":748 - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 748, __pyx_L1_error) - - /* "View.MemoryView":747 - * - * for dim, index in enumerate(indices): - * if PyIndex_Check(index): # <<<<<<<<<<<<<< - * slice_memviewslice( - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - */ - goto __pyx_L6; - } - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - __pyx_t_2 = (__pyx_v_index == Py_None); - __pyx_t_1 = (__pyx_t_2 != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":755 - * False) - * elif index is None: - * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - */ - (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; - - /* "View.MemoryView":756 - * elif index is None: - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 - */ - (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; - - /* "View.MemoryView":757 - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< - * new_ndim += 1 - * else: - */ - (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; - - /* "View.MemoryView":758 - * p_dst.strides[new_ndim] = 0 - * p_dst.suboffsets[new_ndim] = -1 - * new_ndim += 1 # <<<<<<<<<<<<<< - * else: - * start = index.start or 0 - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - - /* "View.MemoryView":754 - * 0, 0, 0, # have_{start,stop,step} - * False) - * elif index is None: # <<<<<<<<<<<<<< - * p_dst.shape[new_ndim] = 1 - * p_dst.strides[new_ndim] = 0 - */ - goto __pyx_L6; - } - - /* "View.MemoryView":760 - * new_ndim += 1 - * else: - * start = index.start or 0 # <<<<<<<<<<<<<< - * stop = index.stop or 0 - * step = index.step or 0 - */ - /*else*/ { - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 760, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 760, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 760, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L7_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L7_bool_binop_done:; - __pyx_v_start = __pyx_t_10; - - /* "View.MemoryView":761 - * else: - * start = index.start or 0 - * stop = index.stop or 0 # <<<<<<<<<<<<<< - * step = index.step or 0 - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 761, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 761, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 761, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L9_bool_binop_done:; - __pyx_v_stop = __pyx_t_10; - - /* "View.MemoryView":762 - * start = index.start or 0 - * stop = index.stop or 0 - * step = index.step or 0 # <<<<<<<<<<<<<< - * - * have_start = index.start is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 762, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 762, __pyx_L1_error) - if (!__pyx_t_1) { - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - } else { - __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) - __pyx_t_10 = __pyx_t_12; - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - goto __pyx_L11_bool_binop_done; - } - __pyx_t_10 = 0; - __pyx_L11_bool_binop_done:; - __pyx_v_step = __pyx_t_10; - - /* "View.MemoryView":764 - * step = index.step or 0 - * - * have_start = index.start is not None # <<<<<<<<<<<<<< - * have_stop = index.stop is not None - * have_step = index.step is not None - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 764, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_start = __pyx_t_1; - - /* "View.MemoryView":765 - * - * have_start = index.start is not None - * have_stop = index.stop is not None # <<<<<<<<<<<<<< - * have_step = index.step is not None - * - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 765, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_stop = __pyx_t_1; - - /* "View.MemoryView":766 - * have_start = index.start is not None - * have_stop = index.stop is not None - * have_step = index.step is not None # <<<<<<<<<<<<<< - * - * slice_memviewslice( - */ - __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 766, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_9); - __pyx_t_1 = (__pyx_t_9 != Py_None); - __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; - __pyx_v_have_step = __pyx_t_1; - - /* "View.MemoryView":768 - * have_step = index.step is not None - * - * slice_memviewslice( # <<<<<<<<<<<<<< - * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], - * dim, new_ndim, p_suboffset_dim, - */ - __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 768, __pyx_L1_error) - - /* "View.MemoryView":774 - * have_start, have_stop, have_step, - * True) - * new_ndim += 1 # <<<<<<<<<<<<<< - * - * if isinstance(memview, _memoryviewslice): - */ - __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); - } - __pyx_L6:; - - /* "View.MemoryView":746 - * cdef bint have_start, have_stop, have_step - * - * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< - * if PyIndex_Check(index): - * slice_memviewslice( - */ - } - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":778 - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< - * memviewsliceobj.to_dtype_func, - * memview.dtype_is_object) - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 778, __pyx_L1_error) } - - /* "View.MemoryView":779 - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * else: - */ - if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 779, __pyx_L1_error) } - - /* "View.MemoryView":777 - * - * if isinstance(memview, _memoryviewslice): - * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< - * memviewsliceobj.to_object_func, - * memviewsliceobj.to_dtype_func, - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 777, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 777, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - - /* "View.MemoryView":776 - * new_ndim += 1 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * return memoryview_fromslice(dst, new_ndim, - * memviewsliceobj.to_object_func, - */ - } - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - /*else*/ { - __Pyx_XDECREF(((PyObject *)__pyx_r)); - - /* "View.MemoryView":783 - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 782, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - - /* "View.MemoryView":782 - * memview.dtype_is_object) - * else: - * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< - * memview.dtype_is_object) - * - */ - if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 782, __pyx_L1_error) - __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); - __pyx_t_3 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":710 - * - * @cname('__pyx_memview_slice') - * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< - * cdef int new_ndim = 0, suboffset_dim = -1, dim - * cdef bint negative_step - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_9); - __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); - __Pyx_XDECREF(__pyx_v_index); - __Pyx_XGIVEREF((PyObject *)__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - -static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { - Py_ssize_t __pyx_v_new_shape; - int __pyx_v_negative_step; - int __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - __pyx_t_1 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":830 - * - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":829 - * if not is_slice: - * - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if not 0 <= start < shape: - */ - } - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - __pyx_t_1 = (0 <= __pyx_v_start); - if (__pyx_t_1) { - __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); - } - __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":832 - * start += shape - * if not 0 <= start < shape: - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< - * else: - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 832, __pyx_L1_error) - - /* "View.MemoryView":831 - * if start < 0: - * start += shape - * if not 0 <= start < shape: # <<<<<<<<<<<<<< - * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) - * else: - */ - } - - /* "View.MemoryView":827 - * cdef bint negative_step - * - * if not is_slice: # <<<<<<<<<<<<<< - * - * if start < 0: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":835 - * else: - * - * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< - * - * if have_step and step == 0: - */ - /*else*/ { - __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step < 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L6_bool_binop_done:; - __pyx_v_negative_step = __pyx_t_2; - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - __pyx_t_1 = (__pyx_v_have_step != 0); - if (__pyx_t_1) { - } else { - __pyx_t_2 = __pyx_t_1; - goto __pyx_L9_bool_binop_done; - } - __pyx_t_1 = ((__pyx_v_step == 0) != 0); - __pyx_t_2 = __pyx_t_1; - __pyx_L9_bool_binop_done:; - if (__pyx_t_2) { - - /* "View.MemoryView":838 - * - * if have_step and step == 0: - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 838, __pyx_L1_error) - - /* "View.MemoryView":837 - * negative_step = have_step != 0 and step < 0 - * - * if have_step and step == 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) - * - */ - } - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - __pyx_t_2 = (__pyx_v_have_start != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":843 - * if have_start: - * if start < 0: - * start += shape # <<<<<<<<<<<<<< - * if start < 0: - * start = 0 - */ - __pyx_v_start = (__pyx_v_start + __pyx_v_shape); - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - __pyx_t_2 = ((__pyx_v_start < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":845 - * start += shape - * if start < 0: - * start = 0 # <<<<<<<<<<<<<< - * elif start >= shape: - * if negative_step: - */ - __pyx_v_start = 0; - - /* "View.MemoryView":844 - * if start < 0: - * start += shape - * if start < 0: # <<<<<<<<<<<<<< - * start = 0 - * elif start >= shape: - */ - } - - /* "View.MemoryView":842 - * - * if have_start: - * if start < 0: # <<<<<<<<<<<<<< - * start += shape - * if start < 0: - */ - goto __pyx_L12; - } - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":848 - * elif start >= shape: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = shape - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":847 - * start = 0 - * elif start >= shape: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L14; - } - - /* "View.MemoryView":850 - * start = shape - 1 - * else: - * start = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - /*else*/ { - __pyx_v_start = __pyx_v_shape; - } - __pyx_L14:; - - /* "View.MemoryView":846 - * if start < 0: - * start = 0 - * elif start >= shape: # <<<<<<<<<<<<<< - * if negative_step: - * start = shape - 1 - */ - } - __pyx_L12:; - - /* "View.MemoryView":841 - * - * - * if have_start: # <<<<<<<<<<<<<< - * if start < 0: - * start += shape - */ - goto __pyx_L11; - } - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":853 - * else: - * if negative_step: - * start = shape - 1 # <<<<<<<<<<<<<< - * else: - * start = 0 - */ - __pyx_v_start = (__pyx_v_shape - 1); - - /* "View.MemoryView":852 - * start = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * start = shape - 1 - * else: - */ - goto __pyx_L15; - } - - /* "View.MemoryView":855 - * start = shape - 1 - * else: - * start = 0 # <<<<<<<<<<<<<< - * - * if have_stop: - */ - /*else*/ { - __pyx_v_start = 0; - } - __pyx_L15:; - } - __pyx_L11:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - __pyx_t_2 = (__pyx_v_have_stop != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":859 - * if have_stop: - * if stop < 0: - * stop += shape # <<<<<<<<<<<<<< - * if stop < 0: - * stop = 0 - */ - __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - __pyx_t_2 = ((__pyx_v_stop < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":861 - * stop += shape - * if stop < 0: - * stop = 0 # <<<<<<<<<<<<<< - * elif stop > shape: - * stop = shape - */ - __pyx_v_stop = 0; - - /* "View.MemoryView":860 - * if stop < 0: - * stop += shape - * if stop < 0: # <<<<<<<<<<<<<< - * stop = 0 - * elif stop > shape: - */ - } - - /* "View.MemoryView":858 - * - * if have_stop: - * if stop < 0: # <<<<<<<<<<<<<< - * stop += shape - * if stop < 0: - */ - goto __pyx_L17; - } - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":863 - * stop = 0 - * elif stop > shape: - * stop = shape # <<<<<<<<<<<<<< - * else: - * if negative_step: - */ - __pyx_v_stop = __pyx_v_shape; - - /* "View.MemoryView":862 - * if stop < 0: - * stop = 0 - * elif stop > shape: # <<<<<<<<<<<<<< - * stop = shape - * else: - */ - } - __pyx_L17:; - - /* "View.MemoryView":857 - * start = 0 - * - * if have_stop: # <<<<<<<<<<<<<< - * if stop < 0: - * stop += shape - */ - goto __pyx_L16; - } - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - /*else*/ { - __pyx_t_2 = (__pyx_v_negative_step != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":866 - * else: - * if negative_step: - * stop = -1 # <<<<<<<<<<<<<< - * else: - * stop = shape - */ - __pyx_v_stop = -1L; - - /* "View.MemoryView":865 - * stop = shape - * else: - * if negative_step: # <<<<<<<<<<<<<< - * stop = -1 - * else: - */ - goto __pyx_L19; - } - - /* "View.MemoryView":868 - * stop = -1 - * else: - * stop = shape # <<<<<<<<<<<<<< - * - * if not have_step: - */ - /*else*/ { - __pyx_v_stop = __pyx_v_shape; - } - __pyx_L19:; - } - __pyx_L16:; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":871 - * - * if not have_step: - * step = 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_step = 1; - - /* "View.MemoryView":870 - * stop = shape - * - * if not have_step: # <<<<<<<<<<<<<< - * step = 1 - * - */ - } - - /* "View.MemoryView":875 - * - * with cython.cdivision(True): - * new_shape = (stop - start) // step # <<<<<<<<<<<<<< - * - * if (stop - start) - step * new_shape: - */ - __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":878 - * - * if (stop - start) - step * new_shape: - * new_shape += 1 # <<<<<<<<<<<<<< - * - * if new_shape < 0: - */ - __pyx_v_new_shape = (__pyx_v_new_shape + 1); - - /* "View.MemoryView":877 - * new_shape = (stop - start) // step - * - * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< - * new_shape += 1 - * - */ - } - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":881 - * - * if new_shape < 0: - * new_shape = 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_new_shape = 0; - - /* "View.MemoryView":880 - * new_shape += 1 - * - * if new_shape < 0: # <<<<<<<<<<<<<< - * new_shape = 0 - * - */ - } - - /* "View.MemoryView":884 - * - * - * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset - */ - (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); - - /* "View.MemoryView":885 - * - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< - * dst.suboffsets[new_ndim] = suboffset - * - */ - (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; - - /* "View.MemoryView":886 - * dst.strides[new_ndim] = stride * step - * dst.shape[new_ndim] = new_shape - * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; - } - __pyx_L3:; - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":890 - * - * if suboffset_dim[0] < 0: - * dst.data += start * stride # <<<<<<<<<<<<<< - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride - */ - __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); - - /* "View.MemoryView":889 - * - * - * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< - * dst.data += start * stride - * else: - */ - goto __pyx_L23; - } - - /* "View.MemoryView":892 - * dst.data += start * stride - * else: - * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< - * - * if suboffset >= 0: - */ - /*else*/ { - __pyx_t_3 = (__pyx_v_suboffset_dim[0]); - (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); - } - __pyx_L23:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":897 - * if not is_slice: - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - */ - __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":896 - * if suboffset >= 0: - * if not is_slice: - * if new_ndim == 0: # <<<<<<<<<<<<<< - * dst.data = ( dst.data)[0] + suboffset - * else: - */ - goto __pyx_L26; - } - - /* "View.MemoryView":899 - * dst.data = ( dst.data)[0] + suboffset - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< - * "must be indexed and not sliced", dim) - * else: - */ - /*else*/ { - - /* "View.MemoryView":900 - * else: - * _err_dim(IndexError, "All dimensions preceding dimension %d " - * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< - * else: - * suboffset_dim[0] = new_ndim - */ - __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 899, __pyx_L1_error) - } - __pyx_L26:; - - /* "View.MemoryView":895 - * - * if suboffset >= 0: - * if not is_slice: # <<<<<<<<<<<<<< - * if new_ndim == 0: - * dst.data = ( dst.data)[0] + suboffset - */ - goto __pyx_L25; - } - - /* "View.MemoryView":902 - * "must be indexed and not sliced", dim) - * else: - * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< - * - * return 0 - */ - /*else*/ { - (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; - } - __pyx_L25:; - - /* "View.MemoryView":894 - * dst.suboffsets[suboffset_dim[0]] += start * stride - * - * if suboffset >= 0: # <<<<<<<<<<<<<< - * if not is_slice: - * if new_ndim == 0: - */ - } - - /* "View.MemoryView":904 - * suboffset_dim[0] = new_ndim - * - * return 0 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":807 - * - * @cname('__pyx_memoryview_slice_memviewslice') - * cdef int slice_memviewslice( # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - -static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_suboffset; - Py_ssize_t __pyx_v_itemsize; - char *__pyx_v_resultp; - char *__pyx_r; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("pybuffer_index", 0); - - /* "View.MemoryView":912 - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< - * cdef Py_ssize_t itemsize = view.itemsize - * cdef char *resultp - */ - __pyx_v_suboffset = -1L; - - /* "View.MemoryView":913 - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< - * cdef char *resultp - * - */ - __pyx_t_1 = __pyx_v_view->itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":917 - * - * if view.ndim == 0: - * shape = view.len / itemsize # <<<<<<<<<<<<<< - * stride = itemsize - * else: - */ - if (unlikely(__pyx_v_itemsize == 0)) { - PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); - __PYX_ERR(1, 917, __pyx_L1_error) - } - else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { - PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); - __PYX_ERR(1, 917, __pyx_L1_error) - } - __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); - - /* "View.MemoryView":918 - * if view.ndim == 0: - * shape = view.len / itemsize - * stride = itemsize # <<<<<<<<<<<<<< - * else: - * shape = view.shape[dim] - */ - __pyx_v_stride = __pyx_v_itemsize; - - /* "View.MemoryView":916 - * cdef char *resultp - * - * if view.ndim == 0: # <<<<<<<<<<<<<< - * shape = view.len / itemsize - * stride = itemsize - */ - goto __pyx_L3; - } - - /* "View.MemoryView":920 - * stride = itemsize - * else: - * shape = view.shape[dim] # <<<<<<<<<<<<<< - * stride = view.strides[dim] - * if view.suboffsets != NULL: - */ - /*else*/ { - __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); - - /* "View.MemoryView":921 - * else: - * shape = view.shape[dim] - * stride = view.strides[dim] # <<<<<<<<<<<<<< - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] - */ - __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":923 - * stride = view.strides[dim] - * if view.suboffsets != NULL: - * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< - * - * if index < 0: - */ - __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); - - /* "View.MemoryView":922 - * shape = view.shape[dim] - * stride = view.strides[dim] - * if view.suboffsets != NULL: # <<<<<<<<<<<<<< - * suboffset = view.suboffsets[dim] - * - */ - } - } - __pyx_L3:; - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":926 - * - * if index < 0: - * index += view.shape[dim] # <<<<<<<<<<<<<< - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - */ - __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index < 0) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":928 - * index += view.shape[dim] - * if index < 0: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * if index >= shape: - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 928, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 928, __pyx_L1_error) - - /* "View.MemoryView":927 - * if index < 0: - * index += view.shape[dim] - * if index < 0: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":925 - * suboffset = view.suboffsets[dim] - * - * if index < 0: # <<<<<<<<<<<<<< - * index += view.shape[dim] - * if index < 0: - */ - } - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); - if (unlikely(__pyx_t_2)) { - - /* "View.MemoryView":931 - * - * if index >= shape: - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< - * - * resultp = bufp + index * stride - */ - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 931, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 931, __pyx_L1_error) - - /* "View.MemoryView":930 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * if index >= shape: # <<<<<<<<<<<<<< - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - */ - } - - /* "View.MemoryView":933 - * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) - * - * resultp = bufp + index * stride # <<<<<<<<<<<<<< - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset - */ - __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":935 - * resultp = bufp + index * stride - * if suboffset >= 0: - * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< - * - * return resultp - */ - __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); - - /* "View.MemoryView":934 - * - * resultp = bufp + index * stride - * if suboffset >= 0: # <<<<<<<<<<<<<< - * resultp = ( resultp)[0] + suboffset - * - */ - } - - /* "View.MemoryView":937 - * resultp = ( resultp)[0] + suboffset - * - * return resultp # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_resultp; - goto __pyx_L0; - - /* "View.MemoryView":910 - * - * @cname('__pyx_pybuffer_index') - * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< - * Py_ssize_t dim) except NULL: - * cdef Py_ssize_t shape, stride, suboffset = -1 - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - -static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { - int __pyx_v_ndim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - int __pyx_v_i; - int __pyx_v_j; - int __pyx_r; - int __pyx_t_1; - Py_ssize_t *__pyx_t_2; - long __pyx_t_3; - long __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - int __pyx_t_7; - int __pyx_t_8; - int __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":944 - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: - * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< - * - * cdef Py_ssize_t *shape = memslice.shape - */ - __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; - __pyx_v_ndim = __pyx_t_1; - - /* "View.MemoryView":946 - * cdef int ndim = memslice.memview.view.ndim - * - * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< - * cdef Py_ssize_t *strides = memslice.strides - * - */ - __pyx_t_2 = __pyx_v_memslice->shape; - __pyx_v_shape = __pyx_t_2; - - /* "View.MemoryView":947 - * - * cdef Py_ssize_t *shape = memslice.shape - * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = __pyx_v_memslice->strides; - __pyx_v_strides = __pyx_t_2; - - /* "View.MemoryView":951 - * - * cdef int i, j - * for i in range(ndim / 2): # <<<<<<<<<<<<<< - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - */ - __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); - __pyx_t_4 = __pyx_t_3; - for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":952 - * cdef int i, j - * for i in range(ndim / 2): - * j = ndim - 1 - i # <<<<<<<<<<<<<< - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] - */ - __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); - - /* "View.MemoryView":953 - * for i in range(ndim / 2): - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< - * shape[i], shape[j] = shape[j], shape[i] - * - */ - __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); - __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); - (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; - (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; - - /* "View.MemoryView":954 - * j = ndim - 1 - i - * strides[i], strides[j] = strides[j], strides[i] - * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - */ - __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); - __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); - (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; - (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); - if (!__pyx_t_8) { - } else { - __pyx_t_7 = __pyx_t_8; - goto __pyx_L6_bool_binop_done; - } - __pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); - __pyx_t_7 = __pyx_t_8; - __pyx_L6_bool_binop_done:; - if (__pyx_t_7) { - - /* "View.MemoryView":957 - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< - * - * return 1 - */ - __pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 957, __pyx_L1_error) - - /* "View.MemoryView":956 - * shape[i], shape[j] = shape[j], shape[i] - * - * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - */ - } - } - - /* "View.MemoryView":959 - * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") - * - * return 1 # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = 1; - goto __pyx_L0; - - /* "View.MemoryView":943 - * - * @cname('__pyx_memslice_transpose') - * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< - * cdef int ndim = memslice.memview.view.ndim - * - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = 0; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - -/* Python wrapper */ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ -static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); - __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__dealloc__", 0); - - /* "View.MemoryView":977 - * - * def __dealloc__(self): - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< - * - * cdef convert_item_to_object(self, char *itemp): - */ - __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); - - /* "View.MemoryView":976 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * def __dealloc__(self): # <<<<<<<<<<<<<< - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - -static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("convert_item_to_object", 0); - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":981 - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) # <<<<<<<<<<<<<< - * else: - * return memoryview.convert_item_to_object(self, itemp) - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 981, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - - /* "View.MemoryView":980 - * - * cdef convert_item_to_object(self, char *itemp): - * if self.to_object_func != NULL: # <<<<<<<<<<<<<< - * return self.to_object_func(itemp) - * else: - */ - } - - /* "View.MemoryView":983 - * return self.to_object_func(itemp) - * else: - * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< - * - * cdef assign_item_from_object(self, char *itemp, object value): - */ - /*else*/ { - __Pyx_XDECREF(__pyx_r); - __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 983, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_r = __pyx_t_2; - __pyx_t_2 = 0; - goto __pyx_L0; - } - - /* "View.MemoryView":979 - * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) - * - * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< - * if self.to_object_func != NULL: - * return self.to_object_func(itemp) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - -static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("assign_item_from_object", 0); - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":987 - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< - * else: - * memoryview.assign_item_from_object(self, itemp, value) - */ - __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 987, __pyx_L1_error) - - /* "View.MemoryView":986 - * - * cdef assign_item_from_object(self, char *itemp, object value): - * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< - * self.to_dtype_func(itemp, value) - * else: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":989 - * self.to_dtype_func(itemp, value) - * else: - * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< - * - * @property - */ - /*else*/ { - __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 989, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - } - __pyx_L3:; - - /* "View.MemoryView":985 - * return memoryview.convert_item_to_object(self, itemp) - * - * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< - * if self.to_dtype_func != NULL: - * self.to_dtype_func(itemp, value) - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ -static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); - __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__get__", 0); - - /* "View.MemoryView":993 - * @property - * def base(self): - * return self.from_object # <<<<<<<<<<<<<< - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v_self->from_object); - __pyx_r = __pyx_v_self->from_object; - goto __pyx_L0; - - /* "View.MemoryView":992 - * - * @property - * def base(self): # <<<<<<<<<<<<<< - * return self.from_object - * - */ - - /* function exit code */ - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__reduce_cython__", 0); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 2, __pyx_L1_error) - - /* "(tree fragment)":1 - * def __reduce_cython__(self): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - -/* Python wrapper */ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ -static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); - __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__setstate_cython__", 0); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 4, __pyx_L1_error) - - /* "(tree fragment)":3 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - -static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { - struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; - Py_ssize_t __pyx_v_suboffset; - PyObject *__pyx_v_length = NULL; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - __Pyx_TypeInfo *__pyx_t_4; - Py_buffer __pyx_t_5; - Py_ssize_t *__pyx_t_6; - Py_ssize_t *__pyx_t_7; - Py_ssize_t *__pyx_t_8; - Py_ssize_t __pyx_t_9; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_fromslice", 0); - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1008 - * - * if memviewslice.memview == Py_None: - * return None # <<<<<<<<<<<<<< - * - * - */ - __Pyx_XDECREF(__pyx_r); - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - - /* "View.MemoryView":1007 - * cdef _memoryviewslice result - * - * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< - * return None - * - */ - } - - /* "View.MemoryView":1013 - * - * - * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< - * - * result.from_slice = memviewslice - */ - __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(Py_None); - __Pyx_GIVEREF(Py_None); - PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); - __Pyx_INCREF(__pyx_int_0); - __Pyx_GIVEREF(__pyx_int_0); - PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); - __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1015 - * result = _memoryviewslice(None, 0, dtype_is_object) - * - * result.from_slice = memviewslice # <<<<<<<<<<<<<< - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - */ - __pyx_v_result->from_slice = __pyx_v_memviewslice; - - /* "View.MemoryView":1016 - * - * result.from_slice = memviewslice - * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< - * - * result.from_object = ( memviewslice.memview).base - */ - __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); - - /* "View.MemoryView":1018 - * __PYX_INC_MEMVIEW(&memviewslice, 1) - * - * result.from_object = ( memviewslice.memview).base # <<<<<<<<<<<<<< - * result.typeinfo = memviewslice.memview.typeinfo - * - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_GIVEREF(__pyx_t_2); - __Pyx_GOTREF(__pyx_v_result->from_object); - __Pyx_DECREF(__pyx_v_result->from_object); - __pyx_v_result->from_object = __pyx_t_2; - __pyx_t_2 = 0; - - /* "View.MemoryView":1019 - * - * result.from_object = ( memviewslice.memview).base - * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< - * - * result.view = memviewslice.memview.view - */ - __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; - __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; - - /* "View.MemoryView":1021 - * result.typeinfo = memviewslice.memview.typeinfo - * - * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - */ - __pyx_t_5 = __pyx_v_memviewslice.memview->view; - __pyx_v_result->__pyx_base.view = __pyx_t_5; - - /* "View.MemoryView":1022 - * - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - */ - __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); - - /* "View.MemoryView":1023 - * result.view = memviewslice.memview.view - * result.view.buf = memviewslice.data - * result.view.ndim = ndim # <<<<<<<<<<<<<< - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) - */ - __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; - - /* "View.MemoryView":1024 - * result.view.buf = memviewslice.data - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< - * Py_INCREF(Py_None) - * - */ - ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; - - /* "View.MemoryView":1025 - * result.view.ndim = ndim - * (<__pyx_buffer *> &result.view).obj = Py_None - * Py_INCREF(Py_None) # <<<<<<<<<<<<<< - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - */ - Py_INCREF(Py_None); - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1028 - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: - * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< - * else: - * result.flags = PyBUF_RECORDS_RO - */ - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; - - /* "View.MemoryView":1027 - * Py_INCREF(Py_None) - * - * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< - * result.flags = PyBUF_RECORDS - * else: - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1030 - * result.flags = PyBUF_RECORDS - * else: - * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< - * - * result.view.shape = result.from_slice.shape - */ - /*else*/ { - __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; - } - __pyx_L4:; - - /* "View.MemoryView":1032 - * result.flags = PyBUF_RECORDS_RO - * - * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< - * result.view.strides = result.from_slice.strides - * - */ - __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); - - /* "View.MemoryView":1033 - * - * result.view.shape = result.from_slice.shape - * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); - - /* "View.MemoryView":1036 - * - * - * result.view.suboffsets = NULL # <<<<<<<<<<<<<< - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - */ - __pyx_v_result->__pyx_base.view.suboffsets = NULL; - - /* "View.MemoryView":1037 - * - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - */ - __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_v_suboffset = (__pyx_t_6[0]); - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1039 - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); - - /* "View.MemoryView":1040 - * if suboffset >= 0: - * result.view.suboffsets = result.from_slice.suboffsets - * break # <<<<<<<<<<<<<< - * - * result.view.len = result.view.itemsize - */ - goto __pyx_L6_break; - - /* "View.MemoryView":1038 - * result.view.suboffsets = NULL - * for suboffset in result.from_slice.suboffsets[:ndim]: - * if suboffset >= 0: # <<<<<<<<<<<<<< - * result.view.suboffsets = result.from_slice.suboffsets - * break - */ - } - } - __pyx_L6_break:; - - /* "View.MemoryView":1042 - * break - * - * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< - * for length in result.view.shape[:ndim]: - * result.view.len *= length - */ - __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - - /* "View.MemoryView":1043 - * - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< - * result.view.len *= length - * - */ - __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); - for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { - __pyx_t_6 = __pyx_t_8; - __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); - __pyx_t_2 = 0; - - /* "View.MemoryView":1044 - * result.view.len = result.view.itemsize - * for length in result.view.shape[:ndim]: - * result.view.len *= length # <<<<<<<<<<<<<< - * - * result.to_object_func = to_object_func - */ - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __pyx_v_result->__pyx_base.view.len = __pyx_t_9; - } - - /* "View.MemoryView":1046 - * result.view.len *= length - * - * result.to_object_func = to_object_func # <<<<<<<<<<<<<< - * result.to_dtype_func = to_dtype_func - * - */ - __pyx_v_result->to_object_func = __pyx_v_to_object_func; - - /* "View.MemoryView":1047 - * - * result.to_object_func = to_object_func - * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< - * - * return result - */ - __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; - - /* "View.MemoryView":1049 - * result.to_dtype_func = to_dtype_func - * - * return result # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(((PyObject *)__pyx_v_result)); - __pyx_r = ((PyObject *)__pyx_v_result); - goto __pyx_L0; - - /* "View.MemoryView":999 - * - * @cname('__pyx_memoryview_fromslice') - * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< - * int ndim, - * object (*to_object_func)(char *), - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_result); - __Pyx_XDECREF(__pyx_v_length); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - -static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { - struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; - __Pyx_memviewslice *__pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *__pyx_t_3 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("get_slice_from_memview", 0); - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1056 - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): - * obj = memview # <<<<<<<<<<<<<< - * return &obj.from_slice - * else: - */ - if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) - __pyx_t_3 = ((PyObject *)__pyx_v_memview); - __Pyx_INCREF(__pyx_t_3); - __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); - __pyx_t_3 = 0; - - /* "View.MemoryView":1057 - * if isinstance(memview, _memoryviewslice): - * obj = memview - * return &obj.from_slice # <<<<<<<<<<<<<< - * else: - * slice_copy(memview, mslice) - */ - __pyx_r = (&__pyx_v_obj->from_slice); - goto __pyx_L0; - - /* "View.MemoryView":1055 - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * obj = memview - * return &obj.from_slice - */ - } - - /* "View.MemoryView":1059 - * return &obj.from_slice - * else: - * slice_copy(memview, mslice) # <<<<<<<<<<<<<< - * return mslice - * - */ - /*else*/ { - __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); - - /* "View.MemoryView":1060 - * else: - * slice_copy(memview, mslice) - * return mslice # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_slice_copy') - */ - __pyx_r = __pyx_v_mslice; - goto __pyx_L0; - } - - /* "View.MemoryView":1052 - * - * @cname('__pyx_memoryview_get_slice_from_memoryview') - * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *mslice) except NULL: - * cdef _memoryviewslice obj - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_3); - __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF((PyObject *)__pyx_v_obj); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - -static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { - int __pyx_v_dim; - Py_ssize_t *__pyx_v_shape; - Py_ssize_t *__pyx_v_strides; - Py_ssize_t *__pyx_v_suboffsets; - __Pyx_RefNannyDeclarations - Py_ssize_t *__pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - Py_ssize_t __pyx_t_5; - __Pyx_RefNannySetupContext("slice_copy", 0); - - /* "View.MemoryView":1067 - * cdef (Py_ssize_t*) shape, strides, suboffsets - * - * shape = memview.view.shape # <<<<<<<<<<<<<< - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets - */ - __pyx_t_1 = __pyx_v_memview->view.shape; - __pyx_v_shape = __pyx_t_1; - - /* "View.MemoryView":1068 - * - * shape = memview.view.shape - * strides = memview.view.strides # <<<<<<<<<<<<<< - * suboffsets = memview.view.suboffsets - * - */ - __pyx_t_1 = __pyx_v_memview->view.strides; - __pyx_v_strides = __pyx_t_1; - - /* "View.MemoryView":1069 - * shape = memview.view.shape - * strides = memview.view.strides - * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< - * - * dst.memview = <__pyx_memoryview *> memview - */ - __pyx_t_1 = __pyx_v_memview->view.suboffsets; - __pyx_v_suboffsets = __pyx_t_1; - - /* "View.MemoryView":1071 - * suboffsets = memview.view.suboffsets - * - * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< - * dst.data = memview.view.buf - * - */ - __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); - - /* "View.MemoryView":1072 - * - * dst.memview = <__pyx_memoryview *> memview - * dst.data = memview.view.buf # <<<<<<<<<<<<<< - * - * for dim in range(memview.view.ndim): - */ - __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); - - /* "View.MemoryView":1074 - * dst.data = memview.view.buf - * - * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - */ - __pyx_t_2 = __pyx_v_memview->view.ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_dim = __pyx_t_4; - - /* "View.MemoryView":1075 - * - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - */ - (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); - - /* "View.MemoryView":1076 - * for dim in range(memview.view.ndim): - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 - * - */ - (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); - - /* "View.MemoryView":1077 - * dst.shape[dim] = shape[dim] - * dst.strides[dim] = strides[dim] - * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object') - */ - if ((__pyx_v_suboffsets != 0)) { - __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); - } else { - __pyx_t_5 = -1L; - } - (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; - } - - /* "View.MemoryView":1063 - * - * @cname('__pyx_memoryview_slice_copy') - * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< - * cdef int dim - * cdef (Py_ssize_t*) shape, strides, suboffsets - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - -static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { - __Pyx_memviewslice __pyx_v_memviewslice; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy", 0); - - /* "View.MemoryView":1083 - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< - * return memoryview_copy_from_slice(memview, &memviewslice) - * - */ - __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); - - /* "View.MemoryView":1084 - * cdef __Pyx_memviewslice memviewslice - * slice_copy(memview, &memviewslice) - * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_object_from_slice') - */ - __Pyx_XDECREF(__pyx_r); - __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_r = __pyx_t_1; - __pyx_t_1 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1080 - * - * @cname('__pyx_memoryview_copy_object') - * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< - * "Create a new memoryview object" - * cdef __Pyx_memviewslice memviewslice - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - -static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { - PyObject *(*__pyx_v_to_object_func)(char *); - int (*__pyx_v_to_dtype_func)(char *, PyObject *); - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - int __pyx_t_2; - PyObject *(*__pyx_t_3)(char *); - int (*__pyx_t_4)(char *, PyObject *); - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); - __pyx_t_2 = (__pyx_t_1 != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1095 - * - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - */ - __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; - __pyx_v_to_object_func = __pyx_t_3; - - /* "View.MemoryView":1096 - * if isinstance(memview, _memoryviewslice): - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< - * else: - * to_object_func = NULL - */ - __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; - __pyx_v_to_dtype_func = __pyx_t_4; - - /* "View.MemoryView":1094 - * cdef int (*to_dtype_func)(char *, object) except 0 - * - * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< - * to_object_func = (<_memoryviewslice> memview).to_object_func - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1098 - * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func - * else: - * to_object_func = NULL # <<<<<<<<<<<<<< - * to_dtype_func = NULL - * - */ - /*else*/ { - __pyx_v_to_object_func = NULL; - - /* "View.MemoryView":1099 - * else: - * to_object_func = NULL - * to_dtype_func = NULL # <<<<<<<<<<<<<< - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - */ - __pyx_v_to_dtype_func = NULL; - } - __pyx_L3:; - - /* "View.MemoryView":1101 - * to_dtype_func = NULL - * - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< - * to_object_func, to_dtype_func, - * memview.dtype_is_object) - */ - __Pyx_XDECREF(__pyx_r); - - /* "View.MemoryView":1103 - * return memoryview_fromslice(memviewslice[0], memview.view.ndim, - * to_object_func, to_dtype_func, - * memview.dtype_is_object) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1101, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_5); - __pyx_r = __pyx_t_5; - __pyx_t_5 = 0; - goto __pyx_L0; - - /* "View.MemoryView":1087 - * - * @cname('__pyx_memoryview_copy_object_from_slice') - * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< - * """ - * Create a new memoryview object from a given memoryview object and slice. - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - -static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { - Py_ssize_t __pyx_r; - int __pyx_t_1; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - __pyx_t_1 = ((__pyx_v_arg < 0) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1111 - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: - * return -arg # <<<<<<<<<<<<<< - * else: - * return arg - */ - __pyx_r = (-__pyx_v_arg); - goto __pyx_L0; - - /* "View.MemoryView":1110 - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: - * if arg < 0: # <<<<<<<<<<<<<< - * return -arg - * else: - */ - } - - /* "View.MemoryView":1113 - * return -arg - * else: - * return arg # <<<<<<<<<<<<<< - * - * @cname('__pyx_get_best_slice_order') - */ - /*else*/ { - __pyx_r = __pyx_v_arg; - goto __pyx_L0; - } - - /* "View.MemoryView":1109 - * - * - * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< - * if arg < 0: - * return -arg - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - -static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { - int __pyx_v_i; - Py_ssize_t __pyx_v_c_stride; - Py_ssize_t __pyx_v_f_stride; - char __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1121 - * """ - * cdef int i - * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< - * cdef Py_ssize_t f_stride = 0 - * - */ - __pyx_v_c_stride = 0; - - /* "View.MemoryView":1122 - * cdef int i - * cdef Py_ssize_t c_stride = 0 - * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_f_stride = 0; - - /* "View.MemoryView":1124 - * cdef Py_ssize_t f_stride = 0 - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1126 - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1127 - * if mslice.shape[i] > 1: - * c_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - goto __pyx_L4_break; - - /* "View.MemoryView":1125 - * - * for i in range(ndim - 1, -1, -1): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * c_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L4_break:; - - /* "View.MemoryView":1129 - * break - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - */ - __pyx_t_1 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_1; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1131 - * for i in range(ndim): - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< - * break - * - */ - __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1132 - * if mslice.shape[i] > 1: - * f_stride = mslice.strides[i] - * break # <<<<<<<<<<<<<< - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - */ - goto __pyx_L7_break; - - /* "View.MemoryView":1130 - * - * for i in range(ndim): - * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< - * f_stride = mslice.strides[i] - * break - */ - } - } - __pyx_L7_break:; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1135 - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): - * return 'C' # <<<<<<<<<<<<<< - * else: - * return 'F' - */ - __pyx_r = 'C'; - goto __pyx_L0; - - /* "View.MemoryView":1134 - * break - * - * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< - * return 'C' - * else: - */ - } - - /* "View.MemoryView":1137 - * return 'C' - * else: - * return 'F' # <<<<<<<<<<<<<< - * - * @cython.cdivision(True) - */ - /*else*/ { - __pyx_r = 'F'; - goto __pyx_L0; - } - - /* "View.MemoryView":1116 - * - * @cname('__pyx_get_best_slice_order') - * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< - * """ - * Figure out the best memory access order for a given slice. - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - -static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; - Py_ssize_t __pyx_v_dst_extent; - Py_ssize_t __pyx_v_src_stride; - Py_ssize_t __pyx_v_dst_stride; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - Py_ssize_t __pyx_t_4; - Py_ssize_t __pyx_t_5; - Py_ssize_t __pyx_t_6; - - /* "View.MemoryView":1147 - * - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - */ - __pyx_v_src_extent = (__pyx_v_src_shape[0]); - - /* "View.MemoryView":1148 - * cdef Py_ssize_t i - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] - */ - __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); - - /* "View.MemoryView":1149 - * cdef Py_ssize_t src_extent = src_shape[0] - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - */ - __pyx_v_src_stride = (__pyx_v_src_strides[0]); - - /* "View.MemoryView":1150 - * cdef Py_ssize_t dst_extent = dst_shape[0] - * cdef Py_ssize_t src_stride = src_strides[0] - * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); - if (__pyx_t_2) { - } else { - __pyx_t_1 = __pyx_t_2; - goto __pyx_L5_bool_binop_done; - } - - /* "View.MemoryView":1154 - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - */ - __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); - if (__pyx_t_2) { - __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); - } - __pyx_t_3 = (__pyx_t_2 != 0); - __pyx_t_1 = __pyx_t_3; - __pyx_L5_bool_binop_done:; - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - if (__pyx_t_1) { - - /* "View.MemoryView":1155 - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); - - /* "View.MemoryView":1153 - * - * if ndim == 1: - * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< - * src_stride == itemsize == dst_stride): - * memcpy(dst_data, src_data, itemsize * dst_extent) - */ - goto __pyx_L4; - } - - /* "View.MemoryView":1157 - * memcpy(dst_data, src_data, itemsize * dst_extent) - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1158 - * else: - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< - * src_data += src_stride - * dst_data += dst_stride - */ - (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); - - /* "View.MemoryView":1159 - * for i in range(dst_extent): - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * else: - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1160 - * memcpy(dst_data, src_data, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * else: - * for i in range(dst_extent): - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L4:; - - /* "View.MemoryView":1152 - * cdef Py_ssize_t dst_stride = dst_strides[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * if (src_stride > 0 and dst_stride > 0 and - * src_stride == itemsize == dst_stride): - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1162 - * dst_data += dst_stride - * else: - * for i in range(dst_extent): # <<<<<<<<<<<<<< - * _copy_strided_to_strided(src_data, src_strides + 1, - * dst_data, dst_strides + 1, - */ - /*else*/ { - __pyx_t_4 = __pyx_v_dst_extent; - __pyx_t_5 = __pyx_t_4; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1163 - * else: - * for i in range(dst_extent): - * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< - * dst_data, dst_strides + 1, - * src_shape + 1, dst_shape + 1, - */ - _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); - - /* "View.MemoryView":1167 - * src_shape + 1, dst_shape + 1, - * ndim - 1, itemsize) - * src_data += src_stride # <<<<<<<<<<<<<< - * dst_data += dst_stride - * - */ - __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); - - /* "View.MemoryView":1168 - * ndim - 1, itemsize) - * src_data += src_stride - * dst_data += dst_stride # <<<<<<<<<<<<<< - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, - */ - __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1140 - * - * @cython.cdivision(True) - * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< - * char *dst_data, Py_ssize_t *dst_strides, - * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, - */ - - /* function exit code */ -} - -/* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - -static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { - - /* "View.MemoryView":1173 - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< - * src.shape, dst.shape, ndim, itemsize) - * - */ - _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1170 - * dst_data += dst_stride - * - * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *dst, - * int ndim, size_t itemsize) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - -static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { - Py_ssize_t __pyx_v_shape; - Py_ssize_t __pyx_v_size; - Py_ssize_t __pyx_r; - Py_ssize_t __pyx_t_1; - Py_ssize_t *__pyx_t_2; - Py_ssize_t *__pyx_t_3; - Py_ssize_t *__pyx_t_4; - - /* "View.MemoryView":1179 - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< - * - * for shape in src.shape[:ndim]: - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_size = __pyx_t_1; - - /* "View.MemoryView":1181 - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - * - * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< - * size *= shape - * - */ - __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); - for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { - __pyx_t_2 = __pyx_t_4; - __pyx_v_shape = (__pyx_t_2[0]); - - /* "View.MemoryView":1182 - * - * for shape in src.shape[:ndim]: - * size *= shape # <<<<<<<<<<<<<< - * - * return size - */ - __pyx_v_size = (__pyx_v_size * __pyx_v_shape); - } - - /* "View.MemoryView":1184 - * size *= shape - * - * return size # <<<<<<<<<<<<<< - * - * @cname('__pyx_fill_contig_strides_array') - */ - __pyx_r = __pyx_v_size; - goto __pyx_L0; - - /* "View.MemoryView":1177 - * - * @cname('__pyx_memoryview_slice_get_size') - * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< - * "Return the size of the memory occupied by the slice in number of bytes" - * cdef Py_ssize_t shape, size = src.memview.view.itemsize - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - -static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { - int __pyx_v_idx; - Py_ssize_t __pyx_r; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - __pyx_t_1 = ((__pyx_v_order == 'F') != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1197 - * - * if order == 'F': - * for idx in range(ndim): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - __pyx_t_2 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_idx = __pyx_t_4; - - /* "View.MemoryView":1198 - * if order == 'F': - * for idx in range(ndim): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * else: - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1199 - * for idx in range(ndim): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * else: - * for idx in range(ndim - 1, -1, -1): - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - - /* "View.MemoryView":1196 - * cdef int idx - * - * if order == 'F': # <<<<<<<<<<<<<< - * for idx in range(ndim): - * strides[idx] = stride - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1201 - * stride *= shape[idx] - * else: - * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * strides[idx] = stride - * stride *= shape[idx] - */ - /*else*/ { - for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { - __pyx_v_idx = __pyx_t_2; - - /* "View.MemoryView":1202 - * else: - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride # <<<<<<<<<<<<<< - * stride *= shape[idx] - * - */ - (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; - - /* "View.MemoryView":1203 - * for idx in range(ndim - 1, -1, -1): - * strides[idx] = stride - * stride *= shape[idx] # <<<<<<<<<<<<<< - * - * return stride - */ - __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); - } - } - __pyx_L3:; - - /* "View.MemoryView":1205 - * stride *= shape[idx] - * - * return stride # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_data_to_temp') - */ - __pyx_r = __pyx_v_stride; - goto __pyx_L0; - - /* "View.MemoryView":1187 - * - * @cname('__pyx_fill_contig_strides_array') - * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< - * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, - * int ndim, char order) nogil: - */ - - /* function exit code */ - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - -static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { - int __pyx_v_i; - void *__pyx_v_result; - size_t __pyx_v_itemsize; - size_t __pyx_v_size; - void *__pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - struct __pyx_memoryview_obj *__pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1219 - * cdef void *result - * - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef size_t size = slice_get_size(src, ndim) - * - */ - __pyx_t_1 = __pyx_v_src->memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1220 - * - * cdef size_t itemsize = src.memview.view.itemsize - * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< - * - * result = malloc(size) - */ - __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); - - /* "View.MemoryView":1222 - * cdef size_t size = slice_get_size(src, ndim) - * - * result = malloc(size) # <<<<<<<<<<<<<< - * if not result: - * _err(MemoryError, NULL) - */ - __pyx_v_result = malloc(__pyx_v_size); - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1224 - * result = malloc(size) - * if not result: - * _err(MemoryError, NULL) # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1224, __pyx_L1_error) - - /* "View.MemoryView":1223 - * - * result = malloc(size) - * if not result: # <<<<<<<<<<<<<< - * _err(MemoryError, NULL) - * - */ - } - - /* "View.MemoryView":1227 - * - * - * tmpslice.data = result # <<<<<<<<<<<<<< - * tmpslice.memview = src.memview - * for i in range(ndim): - */ - __pyx_v_tmpslice->data = ((char *)__pyx_v_result); - - /* "View.MemoryView":1228 - * - * tmpslice.data = result - * tmpslice.memview = src.memview # <<<<<<<<<<<<<< - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - */ - __pyx_t_4 = __pyx_v_src->memview; - __pyx_v_tmpslice->memview = __pyx_t_4; - - /* "View.MemoryView":1229 - * tmpslice.data = result - * tmpslice.memview = src.memview - * for i in range(ndim): # <<<<<<<<<<<<<< - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1230 - * tmpslice.memview = src.memview - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< - * tmpslice.suboffsets[i] = -1 - * - */ - (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); - - /* "View.MemoryView":1231 - * for i in range(ndim): - * tmpslice.shape[i] = src.shape[i] - * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, - */ - (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1233 - * tmpslice.suboffsets[i] = -1 - * - * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< - * ndim, order) - * - */ - (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); - - /* "View.MemoryView":1237 - * - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 - */ - __pyx_t_3 = __pyx_v_ndim; - __pyx_t_5 = __pyx_t_3; - for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { - __pyx_v_i = __pyx_t_6; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1239 - * for i in range(ndim): - * if tmpslice.shape[i] == 1: - * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< - * - * if slice_is_contig(src[0], order, ndim): - */ - (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1238 - * - * for i in range(ndim): - * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< - * tmpslice.strides[i] = 0 - * - */ - } - } - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1242 - * - * if slice_is_contig(src[0], order, ndim): - * memcpy(result, src.data, size) # <<<<<<<<<<<<<< - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - */ - (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); - - /* "View.MemoryView":1241 - * tmpslice.strides[i] = 0 - * - * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< - * memcpy(result, src.data, size) - * else: - */ - goto __pyx_L9; - } - - /* "View.MemoryView":1244 - * memcpy(result, src.data, size) - * else: - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< - * - * return result - */ - /*else*/ { - copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); - } - __pyx_L9:; - - /* "View.MemoryView":1246 - * copy_strided_to_strided(src, tmpslice, ndim, itemsize) - * - * return result # <<<<<<<<<<<<<< - * - * - */ - __pyx_r = __pyx_v_result; - goto __pyx_L0; - - /* "View.MemoryView":1208 - * - * @cname('__pyx_memoryview_copy_data_to_temp') - * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice *tmpslice, - * char order, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = NULL; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - -static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_extents", 0); - - /* "View.MemoryView":1254 - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - * (i, extent1, extent2)) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err_dim') - */ - __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1254, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_GIVEREF(__pyx_t_1); - PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_2); - PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); - __Pyx_GIVEREF(__pyx_t_3); - PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); - __pyx_t_1 = 0; - __pyx_t_2 = 0; - __pyx_t_3 = 0; - - /* "View.MemoryView":1253 - * cdef int _err_extents(int i, Py_ssize_t extent1, - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< - * (i, extent1, extent2)) - * - */ - __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1253, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_4, 0, 0, 0); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __PYX_ERR(1, 1253, __pyx_L1_error) - - /* "View.MemoryView":1251 - * - * @cname('__pyx_memoryview_err_extents') - * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< - * Py_ssize_t extent2) except -1 with gil: - * raise ValueError("got differing extents in dimension %d (got %d and %d)" % - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - -static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { - int __pyx_r; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err_dim", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1258 - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: - * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_err') - */ - __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_INCREF(__pyx_v_error); - __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { - __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); - if (likely(__pyx_t_2)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); - __Pyx_INCREF(__pyx_t_2); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_3, function); - } - } - __pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1258, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __Pyx_Raise(__pyx_t_1, 0, 0, 0); - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - __PYX_ERR(1, 1258, __pyx_L1_error) - - /* "View.MemoryView":1257 - * - * @cname('__pyx_memoryview_err_dim') - * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii') % dim) - * - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - -static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { - int __pyx_r; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("_err", 0); - __Pyx_INCREF(__pyx_v_error); - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); - if (unlikely(__pyx_t_1)) { - - /* "View.MemoryView":1263 - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: - * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< - * else: - * raise error - */ - __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_INCREF(__pyx_v_error); - __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_4, function); - } - } - __pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1263, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - __Pyx_Raise(__pyx_t_2, 0, 0, 0); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __PYX_ERR(1, 1263, __pyx_L1_error) - - /* "View.MemoryView":1262 - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: - * if msg != NULL: # <<<<<<<<<<<<<< - * raise error(msg.decode('ascii')) - * else: - */ - } - - /* "View.MemoryView":1265 - * raise error(msg.decode('ascii')) - * else: - * raise error # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_copy_contents') - */ - /*else*/ { - __Pyx_Raise(__pyx_v_error, 0, 0, 0); - __PYX_ERR(1, 1265, __pyx_L1_error) - } - - /* "View.MemoryView":1261 - * - * @cname('__pyx_memoryview_err') - * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< - * if msg != NULL: - * raise error(msg.decode('ascii')) - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = -1; - __Pyx_XDECREF(__pyx_v_error); - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - return __pyx_r; -} - -/* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - -static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { - void *__pyx_v_tmpdata; - size_t __pyx_v_itemsize; - int __pyx_v_i; - char __pyx_v_order; - int __pyx_v_broadcasting; - int __pyx_v_direct_copy; - __Pyx_memviewslice __pyx_v_tmp; - int __pyx_v_ndim; - int __pyx_r; - Py_ssize_t __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - int __pyx_t_6; - void *__pyx_t_7; - int __pyx_t_8; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - - /* "View.MemoryView":1276 - * Check for overlapping memory and verify the shapes. - * """ - * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - */ - __pyx_v_tmpdata = NULL; - - /* "View.MemoryView":1277 - * """ - * cdef void *tmpdata = NULL - * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - */ - __pyx_t_1 = __pyx_v_src.memview->view.itemsize; - __pyx_v_itemsize = __pyx_t_1; - - /* "View.MemoryView":1279 - * cdef size_t itemsize = src.memview.view.itemsize - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< - * cdef bint broadcasting = False - * cdef bint direct_copy = False - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); - - /* "View.MemoryView":1280 - * cdef int i - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False # <<<<<<<<<<<<<< - * cdef bint direct_copy = False - * cdef __Pyx_memviewslice tmp - */ - __pyx_v_broadcasting = 0; - - /* "View.MemoryView":1281 - * cdef char order = get_best_order(&src, src_ndim) - * cdef bint broadcasting = False - * cdef bint direct_copy = False # <<<<<<<<<<<<<< - * cdef __Pyx_memviewslice tmp - * - */ - __pyx_v_direct_copy = 0; - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1285 - * - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); - - /* "View.MemoryView":1284 - * cdef __Pyx_memviewslice tmp - * - * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1287 - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: - * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< - * - * cdef int ndim = max(src_ndim, dst_ndim) - */ - __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); - - /* "View.MemoryView":1286 - * if src_ndim < dst_ndim: - * broadcast_leading(&src, src_ndim, dst_ndim) - * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - */ - } - __pyx_L3:; - - /* "View.MemoryView":1289 - * broadcast_leading(&dst, dst_ndim, src_ndim) - * - * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< - * - * for i in range(ndim): - */ - __pyx_t_3 = __pyx_v_dst_ndim; - __pyx_t_4 = __pyx_v_src_ndim; - if (((__pyx_t_3 > __pyx_t_4) != 0)) { - __pyx_t_5 = __pyx_t_3; - } else { - __pyx_t_5 = __pyx_t_4; - } - __pyx_v_ndim = __pyx_t_5; - - /* "View.MemoryView":1291 - * cdef int ndim = max(src_ndim, dst_ndim) - * - * for i in range(ndim): # <<<<<<<<<<<<<< - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - */ - __pyx_t_5 = __pyx_v_ndim; - __pyx_t_3 = __pyx_t_5; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1294 - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: - * broadcasting = True # <<<<<<<<<<<<<< - * src.strides[i] = 0 - * else: - */ - __pyx_v_broadcasting = 1; - - /* "View.MemoryView":1295 - * if src.shape[i] == 1: - * broadcasting = True - * src.strides[i] = 0 # <<<<<<<<<<<<<< - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) - */ - (__pyx_v_src.strides[__pyx_v_i]) = 0; - - /* "View.MemoryView":1293 - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: - * if src.shape[i] == 1: # <<<<<<<<<<<<<< - * broadcasting = True - * src.strides[i] = 0 - */ - goto __pyx_L7; - } - - /* "View.MemoryView":1297 - * src.strides[i] = 0 - * else: - * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< - * - * if src.suboffsets[i] >= 0: - */ - /*else*/ { - __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) - } - __pyx_L7:; - - /* "View.MemoryView":1292 - * - * for i in range(ndim): - * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< - * if src.shape[i] == 1: - * broadcasting = True - */ - } - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1300 - * - * if src.suboffsets[i] >= 0: - * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< - * - * if slices_overlap(&src, &dst, ndim, itemsize): - */ - __pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1300, __pyx_L1_error) - - /* "View.MemoryView":1299 - * _err_extents(i, dst.shape[i], src.shape[i]) - * - * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - */ - } - } - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1305 - * - * if not slice_is_contig(src, order, ndim): - * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - */ - __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); - - /* "View.MemoryView":1304 - * if slices_overlap(&src, &dst, ndim, itemsize): - * - * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< - * order = get_best_order(&dst, ndim) - * - */ - } - - /* "View.MemoryView":1307 - * order = get_best_order(&dst, ndim) - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< - * src = tmp - * - */ - __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1307, __pyx_L1_error) - __pyx_v_tmpdata = __pyx_t_7; - - /* "View.MemoryView":1308 - * - * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) - * src = tmp # <<<<<<<<<<<<<< - * - * if not broadcasting: - */ - __pyx_v_src = __pyx_v_tmp; - - /* "View.MemoryView":1302 - * _err_dim(ValueError, "Dimension %d is not direct", i) - * - * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< - * - * if not slice_is_contig(src, order, ndim): - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1314 - * - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); - - /* "View.MemoryView":1313 - * - * - * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - */ - goto __pyx_L12; - } - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1316 - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): - * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< - * - * if direct_copy: - */ - __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); - - /* "View.MemoryView":1315 - * if slice_is_contig(src, 'C', ndim): - * direct_copy = slice_is_contig(dst, 'C', ndim) - * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - */ - } - __pyx_L12:; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_2 = (__pyx_v_direct_copy != 0); - if (__pyx_t_2) { - - /* "View.MemoryView":1320 - * if direct_copy: - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1321 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - */ - (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); - - /* "View.MemoryView":1322 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * free(tmpdata) - * return 0 - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1323 - * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1324 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * if order == 'F' == get_best_order(&dst, ndim): - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1318 - * direct_copy = slice_is_contig(dst, 'F', ndim) - * - * if direct_copy: # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - } - - /* "View.MemoryView":1310 - * src = tmp - * - * if not broadcasting: # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_2 = (__pyx_v_order == 'F'); - if (__pyx_t_2) { - __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); - } - __pyx_t_8 = (__pyx_t_2 != 0); - if (__pyx_t_8) { - - /* "View.MemoryView":1329 - * - * - * transpose_memslice(&src) # <<<<<<<<<<<<<< - * transpose_memslice(&dst) - * - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1329, __pyx_L1_error) - - /* "View.MemoryView":1330 - * - * transpose_memslice(&src) - * transpose_memslice(&dst) # <<<<<<<<<<<<<< - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - */ - __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1330, __pyx_L1_error) - - /* "View.MemoryView":1326 - * return 0 - * - * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< - * - * - */ - } - - /* "View.MemoryView":1332 - * transpose_memslice(&dst) - * - * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1333 - * - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - */ - copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); - - /* "View.MemoryView":1334 - * refcount_copying(&dst, dtype_is_object, ndim, False) - * copy_strided_to_strided(&src, &dst, ndim, itemsize) - * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * free(tmpdata) - */ - __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1336 - * refcount_copying(&dst, dtype_is_object, ndim, True) - * - * free(tmpdata) # <<<<<<<<<<<<<< - * return 0 - * - */ - free(__pyx_v_tmpdata); - - /* "View.MemoryView":1337 - * - * free(tmpdata) - * return 0 # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_broadcast_leading') - */ - __pyx_r = 0; - goto __pyx_L0; - - /* "View.MemoryView":1268 - * - * @cname('__pyx_memoryview_copy_contents') - * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< - * __Pyx_memviewslice dst, - * int src_ndim, int dst_ndim, - */ - - /* function exit code */ - __pyx_L1_error:; - { - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif - } - __pyx_r = -1; - __pyx_L0:; - return __pyx_r; -} - -/* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - -static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { - int __pyx_v_i; - int __pyx_v_offset; - int __pyx_t_1; - int __pyx_t_2; - int __pyx_t_3; - - /* "View.MemoryView":1344 - * int ndim_other) nogil: - * cdef int i - * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< - * - * for i in range(ndim - 1, -1, -1): - */ - __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); - - /* "View.MemoryView":1346 - * cdef int offset = ndim_other - ndim - * - * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - */ - for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { - __pyx_v_i = __pyx_t_1; - - /* "View.MemoryView":1347 - * - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - */ - (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); - - /* "View.MemoryView":1348 - * for i in range(ndim - 1, -1, -1): - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - */ - (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); - - /* "View.MemoryView":1349 - * mslice.shape[i + offset] = mslice.shape[i] - * mslice.strides[i + offset] = mslice.strides[i] - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< - * - * for i in range(offset): - */ - (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); - } - - /* "View.MemoryView":1351 - * mslice.suboffsets[i + offset] = mslice.suboffsets[i] - * - * for i in range(offset): # <<<<<<<<<<<<<< - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - */ - __pyx_t_1 = __pyx_v_offset; - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1352 - * - * for i in range(offset): - * mslice.shape[i] = 1 # <<<<<<<<<<<<<< - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 - */ - (__pyx_v_mslice->shape[__pyx_v_i]) = 1; - - /* "View.MemoryView":1353 - * for i in range(offset): - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< - * mslice.suboffsets[i] = -1 - * - */ - (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); - - /* "View.MemoryView":1354 - * mslice.shape[i] = 1 - * mslice.strides[i] = mslice.strides[0] - * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< - * - * - */ - (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; - } - - /* "View.MemoryView":1340 - * - * @cname('__pyx_memoryview_broadcast_leading') - * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< - * int ndim, - * int ndim_other) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - -static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { - int __pyx_t_1; - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - __pyx_t_1 = (__pyx_v_dtype_is_object != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1367 - * - * if dtype_is_object: - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< - * dst.strides, ndim, inc) - * - */ - __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1366 - * - * - * if dtype_is_object: # <<<<<<<<<<<<<< - * refcount_objects_in_slice_with_gil(dst.data, dst.shape, - * dst.strides, ndim, inc) - */ - } - - /* "View.MemoryView":1362 - * - * @cname('__pyx_memoryview_refcount_copying') - * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< - * int ndim, bint inc) nogil: - * - */ - - /* function exit code */ -} - -/* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - -static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - __Pyx_RefNannyDeclarations - #ifdef WITH_THREAD - PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); - #endif - __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); - - /* "View.MemoryView":1374 - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); - - /* "View.MemoryView":1371 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') - * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * bint inc) with gil: - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - #ifdef WITH_THREAD - __Pyx_PyGILState_Release(__pyx_gilstate_save); - #endif -} - -/* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - -static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - __Pyx_RefNannyDeclarations - Py_ssize_t __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); - - /* "View.MemoryView":1381 - * cdef Py_ssize_t i - * - * for i in range(shape[0]): # <<<<<<<<<<<<<< - * if ndim == 1: - * if inc: - */ - __pyx_t_1 = (__pyx_v_shape[0]); - __pyx_t_2 = __pyx_t_1; - for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { - __pyx_v_i = __pyx_t_3; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - __pyx_t_4 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - __pyx_t_4 = (__pyx_v_inc != 0); - if (__pyx_t_4) { - - /* "View.MemoryView":1384 - * if ndim == 1: - * if inc: - * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * Py_DECREF(( data)[0]) - */ - Py_INCREF((((PyObject **)__pyx_v_data)[0])); - - /* "View.MemoryView":1383 - * for i in range(shape[0]): - * if ndim == 1: - * if inc: # <<<<<<<<<<<<<< - * Py_INCREF(( data)[0]) - * else: - */ - goto __pyx_L6; - } - - /* "View.MemoryView":1386 - * Py_INCREF(( data)[0]) - * else: - * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - */ - /*else*/ { - Py_DECREF((((PyObject **)__pyx_v_data)[0])); - } - __pyx_L6:; - - /* "View.MemoryView":1382 - * - * for i in range(shape[0]): - * if ndim == 1: # <<<<<<<<<<<<<< - * if inc: - * Py_INCREF(( data)[0]) - */ - goto __pyx_L5; - } - - /* "View.MemoryView":1388 - * Py_DECREF(( data)[0]) - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, inc) - * - */ - /*else*/ { - - /* "View.MemoryView":1389 - * else: - * refcount_objects_in_slice(data, shape + 1, strides + 1, - * ndim - 1, inc) # <<<<<<<<<<<<<< - * - * data += strides[0] - */ - __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); - } - __pyx_L5:; - - /* "View.MemoryView":1391 - * ndim - 1, inc) - * - * data += strides[0] # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); - } - - /* "View.MemoryView":1377 - * - * @cname('__pyx_memoryview_refcount_objects_in_slice') - * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, bint inc): - * cdef Py_ssize_t i - */ - - /* function exit code */ - __Pyx_RefNannyFinishContext(); -} - -/* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - -static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { - - /* "View.MemoryView":1400 - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); - - /* "View.MemoryView":1401 - * bint dtype_is_object) nogil: - * refcount_copying(dst, dtype_is_object, ndim, False) - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1403 - * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, - * itemsize, item) - * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< - * - * - */ - __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); - - /* "View.MemoryView":1397 - * - * @cname('__pyx_memoryview_slice_assign_scalar') - * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< - * size_t itemsize, void *item, - * bint dtype_is_object) nogil: - */ - - /* function exit code */ -} - -/* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - -static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { - CYTHON_UNUSED Py_ssize_t __pyx_v_i; - Py_ssize_t __pyx_v_stride; - Py_ssize_t __pyx_v_extent; - int __pyx_t_1; - Py_ssize_t __pyx_t_2; - Py_ssize_t __pyx_t_3; - Py_ssize_t __pyx_t_4; - - /* "View.MemoryView":1411 - * size_t itemsize, void *item) nogil: - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< - * cdef Py_ssize_t extent = shape[0] - * - */ - __pyx_v_stride = (__pyx_v_strides[0]); - - /* "View.MemoryView":1412 - * cdef Py_ssize_t i - * cdef Py_ssize_t stride = strides[0] - * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< - * - * if ndim == 1: - */ - __pyx_v_extent = (__pyx_v_shape[0]); - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); - if (__pyx_t_1) { - - /* "View.MemoryView":1415 - * - * if ndim == 1: - * for i in range(extent): # <<<<<<<<<<<<<< - * memcpy(data, item, itemsize) - * data += stride - */ - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1416 - * if ndim == 1: - * for i in range(extent): - * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< - * data += stride - * else: - */ - (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); - - /* "View.MemoryView":1417 - * for i in range(extent): - * memcpy(data, item, itemsize) - * data += stride # <<<<<<<<<<<<<< - * else: - * for i in range(extent): - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - - /* "View.MemoryView":1414 - * cdef Py_ssize_t extent = shape[0] - * - * if ndim == 1: # <<<<<<<<<<<<<< - * for i in range(extent): - * memcpy(data, item, itemsize) - */ - goto __pyx_L3; - } - - /* "View.MemoryView":1419 - * data += stride - * else: - * for i in range(extent): # <<<<<<<<<<<<<< - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - */ - /*else*/ { - __pyx_t_2 = __pyx_v_extent; - __pyx_t_3 = __pyx_t_2; - for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { - __pyx_v_i = __pyx_t_4; - - /* "View.MemoryView":1420 - * else: - * for i in range(extent): - * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< - * ndim - 1, itemsize, item) - * data += stride - */ - __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); - - /* "View.MemoryView":1422 - * _slice_assign_scalar(data, shape + 1, strides + 1, - * ndim - 1, itemsize, item) - * data += stride # <<<<<<<<<<<<<< - * - * - */ - __pyx_v_data = (__pyx_v_data + __pyx_v_stride); - } - } - __pyx_L3:; - - /* "View.MemoryView":1407 - * - * @cname('__pyx_memoryview__slice_assign_scalar') - * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< - * Py_ssize_t *strides, int ndim, - * size_t itemsize, void *item) nogil: - */ - - /* function exit code */ -} - -/* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - -/* Python wrapper */ -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ -static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; -static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { - PyObject *__pyx_v___pyx_type = 0; - long __pyx_v___pyx_checksum; - PyObject *__pyx_v___pyx_state = 0; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - PyObject *__pyx_r = 0; - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); - { - static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; - PyObject* values[3] = {0,0,0}; - if (unlikely(__pyx_kwds)) { - Py_ssize_t kw_args; - const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); - switch (pos_args) { - case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - CYTHON_FALLTHROUGH; - case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - CYTHON_FALLTHROUGH; - case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - CYTHON_FALLTHROUGH; - case 0: break; - default: goto __pyx_L5_argtuple_error; - } - kw_args = PyDict_Size(__pyx_kwds); - switch (pos_args) { - case 0: - if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; - else goto __pyx_L5_argtuple_error; - CYTHON_FALLTHROUGH; - case 1: - if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) - } - CYTHON_FALLTHROUGH; - case 2: - if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; - else { - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) - } - } - if (unlikely(kw_args > 0)) { - if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) - } - } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { - goto __pyx_L5_argtuple_error; - } else { - values[0] = PyTuple_GET_ITEM(__pyx_args, 0); - values[1] = PyTuple_GET_ITEM(__pyx_args, 1); - values[2] = PyTuple_GET_ITEM(__pyx_args, 2); - } - __pyx_v___pyx_type = values[0]; - __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_v___pyx_state = values[2]; - } - goto __pyx_L4_argument_unpacking_done; - __pyx_L5_argtuple_error:; - __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) - __pyx_L3_error:; - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __Pyx_RefNannyFinishContext(); - return NULL; - __pyx_L4_argument_unpacking_done:; - __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); - - /* function exit code */ - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_v___pyx_PickleError = 0; - PyObject *__pyx_v___pyx_result = 0; - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - int __pyx_t_1; - PyObject *__pyx_t_2 = NULL; - PyObject *__pyx_t_3 = NULL; - PyObject *__pyx_t_4 = NULL; - PyObject *__pyx_t_5 = NULL; - int __pyx_t_6; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); - if (__pyx_t_1) { - - /* "(tree fragment)":5 - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - */ - __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_n_s_PickleError); - __Pyx_GIVEREF(__pyx_n_s_PickleError); - PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); - __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __Pyx_INCREF(__pyx_t_2); - __pyx_v___pyx_PickleError = __pyx_t_2; - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":6 - * if __pyx_checksum != 0xb068931: - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - */ - __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_4); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_INCREF(__pyx_v___pyx_PickleError); - __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; - if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_5)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_5); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; - __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __Pyx_Raise(__pyx_t_3, 0, 0, 0); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - __PYX_ERR(1, 6, __pyx_L1_error) - - /* "(tree fragment)":4 - * cdef object __pyx_PickleError - * cdef object __pyx_result - * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - */ - } - - /* "(tree fragment)":7 - * from pickle import PickleError as __pyx_PickleError - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - */ - __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_2); - __pyx_t_4 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { - __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2); - if (likely(__pyx_t_4)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); - __Pyx_INCREF(__pyx_t_4); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_2, function); - } - } - __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); - __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; - if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; - __pyx_v___pyx_result = __pyx_t_3; - __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - __pyx_t_1 = (__pyx_v___pyx_state != Py_None); - __pyx_t_6 = (__pyx_t_1 != 0); - if (__pyx_t_6) { - - /* "(tree fragment)":9 - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - */ - if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error) - __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_3); - __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; - - /* "(tree fragment)":8 - * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) - * __pyx_result = Enum.__new__(__pyx_type) - * if __pyx_state is not None: # <<<<<<<<<<<<<< - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - */ - } - - /* "(tree fragment)":10 - * if __pyx_state is not None: - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result # <<<<<<<<<<<<<< - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - */ - __Pyx_XDECREF(__pyx_r); - __Pyx_INCREF(__pyx_v___pyx_result); - __pyx_r = __pyx_v___pyx_result; - goto __pyx_L0; - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - - /* function exit code */ - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_2); - __Pyx_XDECREF(__pyx_t_3); - __Pyx_XDECREF(__pyx_t_4); - __Pyx_XDECREF(__pyx_t_5); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = NULL; - __pyx_L0:; - __Pyx_XDECREF(__pyx_v___pyx_PickleError); - __Pyx_XDECREF(__pyx_v___pyx_result); - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} - -/* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - -static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { - PyObject *__pyx_r = NULL; - __Pyx_RefNannyDeclarations - PyObject *__pyx_t_1 = NULL; - int __pyx_t_2; - Py_ssize_t __pyx_t_3; - int __pyx_t_4; - int __pyx_t_5; - PyObject *__pyx_t_6 = NULL; - PyObject *__pyx_t_7 = NULL; - PyObject *__pyx_t_8 = NULL; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); - - /* "(tree fragment)":12 - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 12, __pyx_L1_error) - } - __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __Pyx_GOTREF(__pyx_v___pyx_result->name); - __Pyx_DECREF(__pyx_v___pyx_result->name); - __pyx_v___pyx_result->name = __pyx_t_1; - __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); - __PYX_ERR(1, 13, __pyx_L1_error) - } - __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_4 = ((__pyx_t_3 > 1) != 0); - if (__pyx_t_4) { - } else { - __pyx_t_2 = __pyx_t_4; - goto __pyx_L4_bool_binop_done; - } - __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) - __pyx_t_5 = (__pyx_t_4 != 0); - __pyx_t_2 = __pyx_t_5; - __pyx_L4_bool_binop_done:; - if (__pyx_t_2) { - - /* "(tree fragment)":14 - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< - */ - __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_7); - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(__pyx_v___pyx_state == Py_None)) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); - __PYX_ERR(1, 14, __pyx_L1_error) - } - __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_6); - __pyx_t_8 = NULL; - if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { - __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); - if (likely(__pyx_t_8)) { - PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); - __Pyx_INCREF(__pyx_t_8); - __Pyx_INCREF(function); - __Pyx_DECREF_SET(__pyx_t_7, function); - } - } - __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); - __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; - __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; - if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":13 - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< - * __pyx_result.__dict__.update(__pyx_state[1]) - */ - } - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /* function exit code */ - __pyx_r = Py_None; __Pyx_INCREF(Py_None); - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - __Pyx_XDECREF(__pyx_t_6); - __Pyx_XDECREF(__pyx_t_7); - __Pyx_XDECREF(__pyx_t_8); - __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); - __pyx_r = 0; - __pyx_L0:; - __Pyx_XGIVEREF(__pyx_r); - __Pyx_RefNannyFinishContext(); - return __pyx_r; -} -static struct __pyx_vtabstruct_array __pyx_vtable_array; - -static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_array_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_array_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_array; - p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); - p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); - if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_array(PyObject *o) { - struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_array___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->mode); - Py_CLEAR(p->_format); - (*Py_TYPE(o)->tp_free)(o); -} -static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_array___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { - PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); - if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - v = __pyx_array___getattr__(o, n); - } - return v; -} - -static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); -} - -static PyMethodDef __pyx_methods_array[] = { - {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_array[] = { - {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_array = { - __pyx_array___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_array, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_array = { - __pyx_array___len__, /*mp_length*/ - __pyx_array___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_array = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_array_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_array = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.array", /*tp_name*/ - sizeof(struct __pyx_array_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_array, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - 0, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - __pyx_tp_getattro_array, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ - 0, /*tp_doc*/ - 0, /*tp_traverse*/ - 0, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_array, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_array, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_array, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { - struct __pyx_MemviewEnum_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_MemviewEnum_obj *)o); - p->name = Py_None; Py_INCREF(Py_None); - return o; -} - -static void __pyx_tp_dealloc_Enum(PyObject *o) { - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - Py_CLEAR(p->name); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - if (p->name) { - e = (*v)(p->name, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_Enum(PyObject *o) { - PyObject* tmp; - struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; - tmp = ((PyObject*)p->name); - p->name = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - return 0; -} - -static PyMethodDef __pyx_methods_Enum[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_MemviewEnum = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.Enum", /*tp_name*/ - sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_Enum, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_MemviewEnum___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - 0, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_Enum, /*tp_traverse*/ - __pyx_tp_clear_Enum, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_Enum, /*tp_methods*/ - 0, /*tp_members*/ - 0, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - __pyx_MemviewEnum___init__, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_Enum, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; - -static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryview_obj *p; - PyObject *o; - if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { - o = (*t->tp_alloc)(t, 0); - } else { - o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); - } - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryview_obj *)o); - p->__pyx_vtab = __pyx_vtabptr_memoryview; - p->obj = Py_None; Py_INCREF(Py_None); - p->_size = Py_None; Py_INCREF(Py_None); - p->_array_interface = Py_None; Py_INCREF(Py_None); - p->view.obj = NULL; - if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; - return o; - bad: - Py_DECREF(o); o = 0; - return NULL; -} - -static void __pyx_tp_dealloc_memoryview(PyObject *o) { - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryview___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->obj); - Py_CLEAR(p->_size); - Py_CLEAR(p->_array_interface); - (*Py_TYPE(o)->tp_free)(o); -} - -static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - if (p->obj) { - e = (*v)(p->obj, a); if (e) return e; - } - if (p->_size) { - e = (*v)(p->_size, a); if (e) return e; - } - if (p->_array_interface) { - e = (*v)(p->_array_interface, a); if (e) return e; - } - if (p->view.obj) { - e = (*v)(p->view.obj, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear_memoryview(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; - tmp = ((PyObject*)p->obj); - p->obj = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_size); - p->_size = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - tmp = ((PyObject*)p->_array_interface); - p->_array_interface = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - Py_CLEAR(p->view.obj); - return 0; -} -static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { - PyObject *r; - PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; - r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); - Py_DECREF(x); - return r; -} - -static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { - if (v) { - return __pyx_memoryview___setitem__(o, i, v); - } - else { - PyErr_Format(PyExc_NotImplementedError, - "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); - return -1; - } -} - -static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); -} - -static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); -} - -static PyMethodDef __pyx_methods_memoryview[] = { - {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, - {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, - {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, - {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets_memoryview[] = { - {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, - {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, - {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, - {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, - {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, - {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, - {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, - {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, - {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PySequenceMethods __pyx_tp_as_sequence_memoryview = { - __pyx_memoryview___len__, /*sq_length*/ - 0, /*sq_concat*/ - 0, /*sq_repeat*/ - __pyx_sq_item_memoryview, /*sq_item*/ - 0, /*sq_slice*/ - 0, /*sq_ass_item*/ - 0, /*sq_ass_slice*/ - 0, /*sq_contains*/ - 0, /*sq_inplace_concat*/ - 0, /*sq_inplace_repeat*/ -}; - -static PyMappingMethods __pyx_tp_as_mapping_memoryview = { - __pyx_memoryview___len__, /*mp_length*/ - __pyx_memoryview___getitem__, /*mp_subscript*/ - __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ -}; - -static PyBufferProcs __pyx_tp_as_buffer_memoryview = { - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getreadbuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getwritebuffer*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getsegcount*/ - #endif - #if PY_MAJOR_VERSION < 3 - 0, /*bf_getcharbuffer*/ - #endif - __pyx_memoryview_getbuffer, /*bf_getbuffer*/ - 0, /*bf_releasebuffer*/ -}; - -static PyTypeObject __pyx_type___pyx_memoryview = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core.memoryview", /*tp_name*/ - sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - __pyx_memoryview___repr__, /*tp_repr*/ - 0, /*tp_as_number*/ - &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ - &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - __pyx_memoryview___str__, /*tp_str*/ - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - 0, /*tp_doc*/ - __pyx_tp_traverse_memoryview, /*tp_traverse*/ - __pyx_tp_clear_memoryview, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods_memoryview, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets_memoryview, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new_memoryview, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; -static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; - -static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { - struct __pyx_memoryviewslice_obj *p; - PyObject *o = __pyx_tp_new_memoryview(t, a, k); - if (unlikely(!o)) return 0; - p = ((struct __pyx_memoryviewslice_obj *)o); - p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; - p->from_object = Py_None; Py_INCREF(Py_None); - p->from_slice.memview = NULL; - return o; -} - -static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - #if CYTHON_USE_TP_FINALIZE - if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { - if (PyObject_CallFinalizerFromDealloc(o)) return; - } - #endif - PyObject_GC_UnTrack(o); - { - PyObject *etype, *eval, *etb; - PyErr_Fetch(&etype, &eval, &etb); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); - __pyx_memoryviewslice___dealloc__(o); - __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); - PyErr_Restore(etype, eval, etb); - } - Py_CLEAR(p->from_object); - PyObject_GC_Track(o); - __pyx_tp_dealloc_memoryview(o); -} - -static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { - int e; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; - if (p->from_object) { - e = (*v)(p->from_object, a); if (e) return e; - } - return 0; -} - -static int __pyx_tp_clear__memoryviewslice(PyObject *o) { - PyObject* tmp; - struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; - __pyx_tp_clear_memoryview(o); - tmp = ((PyObject*)p->from_object); - p->from_object = Py_None; Py_INCREF(Py_None); - Py_XDECREF(tmp); - __PYX_XDEC_MEMVIEW(&p->from_slice, 1); - return 0; -} - -static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { - return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); -} - -static PyMethodDef __pyx_methods__memoryviewslice[] = { - {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, - {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, - {0, 0, 0, 0} -}; - -static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { - {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, - {0, 0, 0, 0, 0} -}; - -static PyTypeObject __pyx_type___pyx_memoryviewslice = { - PyVarObject_HEAD_INIT(0, 0) - "monotonic_align.core._memoryviewslice", /*tp_name*/ - sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ - 0, /*tp_itemsize*/ - __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ - #if PY_VERSION_HEX < 0x030800b4 - 0, /*tp_print*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 - 0, /*tp_vectorcall_offset*/ - #endif - 0, /*tp_getattr*/ - 0, /*tp_setattr*/ - #if PY_MAJOR_VERSION < 3 - 0, /*tp_compare*/ - #endif - #if PY_MAJOR_VERSION >= 3 - 0, /*tp_as_async*/ - #endif - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___repr__, /*tp_repr*/ - #else - 0, /*tp_repr*/ - #endif - 0, /*tp_as_number*/ - 0, /*tp_as_sequence*/ - 0, /*tp_as_mapping*/ - 0, /*tp_hash*/ - 0, /*tp_call*/ - #if CYTHON_COMPILING_IN_PYPY - __pyx_memoryview___str__, /*tp_str*/ - #else - 0, /*tp_str*/ - #endif - 0, /*tp_getattro*/ - 0, /*tp_setattro*/ - 0, /*tp_as_buffer*/ - Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ - "Internal class for passing memoryview slices to Python", /*tp_doc*/ - __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ - __pyx_tp_clear__memoryviewslice, /*tp_clear*/ - 0, /*tp_richcompare*/ - 0, /*tp_weaklistoffset*/ - 0, /*tp_iter*/ - 0, /*tp_iternext*/ - __pyx_methods__memoryviewslice, /*tp_methods*/ - 0, /*tp_members*/ - __pyx_getsets__memoryviewslice, /*tp_getset*/ - 0, /*tp_base*/ - 0, /*tp_dict*/ - 0, /*tp_descr_get*/ - 0, /*tp_descr_set*/ - 0, /*tp_dictoffset*/ - 0, /*tp_init*/ - 0, /*tp_alloc*/ - __pyx_tp_new__memoryviewslice, /*tp_new*/ - 0, /*tp_free*/ - 0, /*tp_is_gc*/ - 0, /*tp_bases*/ - 0, /*tp_mro*/ - 0, /*tp_cache*/ - 0, /*tp_subclasses*/ - 0, /*tp_weaklist*/ - 0, /*tp_del*/ - 0, /*tp_version_tag*/ - #if PY_VERSION_HEX >= 0x030400a1 - 0, /*tp_finalize*/ - #endif - #if PY_VERSION_HEX >= 0x030800b1 - 0, /*tp_vectorcall*/ - #endif - #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 - 0, /*tp_print*/ - #endif -}; - -static PyMethodDef __pyx_methods[] = { - {"maximum_path_c", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15monotonic_align_4core_1maximum_path_c, METH_VARARGS|METH_KEYWORDS, 0}, - {0, 0, 0, 0} -}; - -#if PY_MAJOR_VERSION >= 3 -#if CYTHON_PEP489_MULTI_PHASE_INIT -static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ -static int __pyx_pymod_exec_core(PyObject* module); /*proto*/ -static PyModuleDef_Slot __pyx_moduledef_slots[] = { - {Py_mod_create, (void*)__pyx_pymod_create}, - {Py_mod_exec, (void*)__pyx_pymod_exec_core}, - {0, NULL} -}; -#endif - -static struct PyModuleDef __pyx_moduledef = { - PyModuleDef_HEAD_INIT, - "core", - 0, /* m_doc */ - #if CYTHON_PEP489_MULTI_PHASE_INIT - 0, /* m_size */ - #else - -1, /* m_size */ - #endif - __pyx_methods /* m_methods */, - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_moduledef_slots, /* m_slots */ - #else - NULL, /* m_reload */ - #endif - NULL, /* m_traverse */ - NULL, /* m_clear */ - NULL /* m_free */ -}; -#endif -#ifndef CYTHON_SMALL_CODE -#if defined(__clang__) - #define CYTHON_SMALL_CODE -#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) - #define CYTHON_SMALL_CODE __attribute__((cold)) -#else - #define CYTHON_SMALL_CODE -#endif -#endif - -static __Pyx_StringTabEntry __pyx_string_tab[] = { - {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, - {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, - {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, - {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, - {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, - {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, - {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, - {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, - {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, - {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, - {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, - {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, - {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, - {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, - {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, - {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, - {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, - {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, - {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, - {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, - {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, - {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, - {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, - {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, - {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, - {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, - {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, - {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, - {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, - {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, - {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, - {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, - {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, - {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, - {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, - {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, - {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, - {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, - {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, - {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, - {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, - {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, - {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, - {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, - {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, - {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, - {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, - {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, - {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, - {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, - {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, - {&__pyx_n_s_paths, __pyx_k_paths, sizeof(__pyx_k_paths), 0, 0, 1, 1}, - {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, - {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, - {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, - {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, - {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, - {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, - {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, - {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, - {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, - {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, - {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, - {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, - {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, - {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, - {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, - {&__pyx_n_s_t_xs, __pyx_k_t_xs, sizeof(__pyx_k_t_xs), 0, 0, 1, 1}, - {&__pyx_n_s_t_ys, __pyx_k_t_ys, sizeof(__pyx_k_t_ys), 0, 0, 1, 1}, - {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, - {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, - {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, - {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, - {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, - {&__pyx_n_s_values, __pyx_k_values, sizeof(__pyx_k_values), 0, 0, 1, 1}, - {0, 0, 0, 0, 0, 0, 0} -}; -static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { - __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 15, __pyx_L1_error) - __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 133, __pyx_L1_error) - __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 148, __pyx_L1_error) - __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 151, __pyx_L1_error) - __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) - __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 404, __pyx_L1_error) - __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 613, __pyx_L1_error) - __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 832, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); - - /* "View.MemoryView":133 - * - * if not self.ndim: - * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< - * - * if itemsize <= 0: - */ - __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 133, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__2); - __Pyx_GIVEREF(__pyx_tuple__2); - - /* "View.MemoryView":136 - * - * if itemsize <= 0: - * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< - * - * if not isinstance(format, bytes): - */ - __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 136, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__3); - __Pyx_GIVEREF(__pyx_tuple__3); - - /* "View.MemoryView":148 - * - * if not self._shape: - * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 148, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__4); - __Pyx_GIVEREF(__pyx_tuple__4); - - /* "View.MemoryView":176 - * self.data = malloc(self.len) - * if not self.data: - * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< - * - * if self.dtype_is_object: - */ - __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 176, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__5); - __Pyx_GIVEREF(__pyx_tuple__5); - - /* "View.MemoryView":192 - * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS - * if not (flags & bufmode): - * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< - * info.buf = self.data - * info.len = self.len - */ - __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 192, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__6); - __Pyx_GIVEREF(__pyx_tuple__6); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__7); - __Pyx_GIVEREF(__pyx_tuple__7); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__8); - __Pyx_GIVEREF(__pyx_tuple__8); - - /* "View.MemoryView":418 - * def __setitem__(memoryview self, object index, object value): - * if self.view.readonly: - * raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<< - * - * have_slices, index = _unellipsify(index, self.view.ndim) - */ - __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 418, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__9); - __Pyx_GIVEREF(__pyx_tuple__9); - - /* "View.MemoryView":495 - * result = struct.unpack(self.view.format, bytesitem) - * except struct.error: - * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< - * else: - * if len(self.view.format) == 1: - */ - __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 495, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__10); - __Pyx_GIVEREF(__pyx_tuple__10); - - /* "View.MemoryView":520 - * def __getbuffer__(self, Py_buffer *info, int flags): - * if flags & PyBUF_WRITABLE and self.view.readonly: - * raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<< - * - * if flags & PyBUF_ND: - */ - __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 520, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__11); - __Pyx_GIVEREF(__pyx_tuple__11); - - /* "View.MemoryView":570 - * if self.view.strides == NULL: - * - * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< - * - * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) - */ - __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 570, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__12); - __Pyx_GIVEREF(__pyx_tuple__12); - - /* "View.MemoryView":577 - * def suboffsets(self): - * if self.view.suboffsets == NULL: - * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< - * - * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) - */ - __pyx_tuple__13 = PyTuple_New(1); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 577, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__13); - __Pyx_INCREF(__pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_int_neg_1); - PyTuple_SET_ITEM(__pyx_tuple__13, 0, __pyx_int_neg_1); - __Pyx_GIVEREF(__pyx_tuple__13); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__14); - __Pyx_GIVEREF(__pyx_tuple__14); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__15); - __Pyx_GIVEREF(__pyx_tuple__15); - - /* "View.MemoryView":682 - * if item is Ellipsis: - * if not seen_ellipsis: - * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< - * seen_ellipsis = True - * else: - */ - __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 682, __pyx_L1_error) - __Pyx_GOTREF(__pyx_slice__16); - __Pyx_GIVEREF(__pyx_slice__16); - - /* "View.MemoryView":703 - * for suboffset in suboffsets[:ndim]: - * if suboffset >= 0: - * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 703, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__17); - __Pyx_GIVEREF(__pyx_tuple__17); - - /* "(tree fragment)":2 - * def __reduce_cython__(self): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - */ - __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__18); - __Pyx_GIVEREF(__pyx_tuple__18); - - /* "(tree fragment)":4 - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") - * def __setstate_cython__(self, __pyx_state): - * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< - */ - __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__19); - __Pyx_GIVEREF(__pyx_tuple__19); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__20); - __Pyx_GIVEREF(__pyx_tuple__20); - - /* "View.MemoryView":287 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__21); - __Pyx_GIVEREF(__pyx_tuple__21); - - /* "View.MemoryView":288 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__22); - __Pyx_GIVEREF(__pyx_tuple__22); - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__23); - __Pyx_GIVEREF(__pyx_tuple__23); - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__24); - __Pyx_GIVEREF(__pyx_tuple__24); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_tuple__25 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_tuple__25); - __Pyx_GIVEREF(__pyx_tuple__25); - __pyx_codeobj__26 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__25, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__26)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { - /* InitThreads.init */ - #ifdef WITH_THREAD -PyEval_InitThreads(); -#endif - -if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) - - if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) - return 0; - __pyx_L1_error:; - return -1; -} - -static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ -static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ - -static int __Pyx_modinit_global_init_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); - /*--- Global init code ---*/ - generic = Py_None; Py_INCREF(Py_None); - strided = Py_None; Py_INCREF(Py_None); - indirect = Py_None; Py_INCREF(Py_None); - contiguous = Py_None; Py_INCREF(Py_None); - indirect_contiguous = Py_None; Py_INCREF(Py_None); - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); - /*--- Variable export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_export_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); - /*--- Function export code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_type_init_code(void) { - __Pyx_RefNannyDeclarations - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); - /*--- Type init code ---*/ - __pyx_vtabptr_array = &__pyx_vtable_array; - __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; - if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_array.tp_print = 0; - #endif - if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 105, __pyx_L1_error) - __pyx_array_type = &__pyx_type___pyx_array; - if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_MemviewEnum.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 279, __pyx_L1_error) - __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; - __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; - __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; - __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; - __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; - __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; - __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; - __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; - __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; - if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryview.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 330, __pyx_L1_error) - __pyx_memoryview_type = &__pyx_type___pyx_memoryview; - __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; - __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; - __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; - __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; - __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; - if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - #if PY_VERSION_HEX < 0x030800B1 - __pyx_type___pyx_memoryviewslice.tp_print = 0; - #endif - if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) { - __pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr; - } - if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 965, __pyx_L1_error) - __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; - __Pyx_RefNannyFinishContext(); - return 0; - __pyx_L1_error:; - __Pyx_RefNannyFinishContext(); - return -1; -} - -static int __Pyx_modinit_type_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); - /*--- Type import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_variable_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); - /*--- Variable import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - -static int __Pyx_modinit_function_import_code(void) { - __Pyx_RefNannyDeclarations - __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); - /*--- Function import code ---*/ - __Pyx_RefNannyFinishContext(); - return 0; -} - - -#ifndef CYTHON_NO_PYINIT_EXPORT -#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC -#elif PY_MAJOR_VERSION < 3 -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" void -#else -#define __Pyx_PyMODINIT_FUNC void -#endif -#else -#ifdef __cplusplus -#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * -#else -#define __Pyx_PyMODINIT_FUNC PyObject * -#endif -#endif - - -#if PY_MAJOR_VERSION < 3 -__Pyx_PyMODINIT_FUNC initcore(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC initcore(void) -#else -__Pyx_PyMODINIT_FUNC PyInit_core(void) CYTHON_SMALL_CODE; /*proto*/ -__Pyx_PyMODINIT_FUNC PyInit_core(void) -#if CYTHON_PEP489_MULTI_PHASE_INIT -{ - return PyModuleDef_Init(&__pyx_moduledef); -} -static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { - #if PY_VERSION_HEX >= 0x030700A1 - static PY_INT64_T main_interpreter_id = -1; - PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); - if (main_interpreter_id == -1) { - main_interpreter_id = current_id; - return (unlikely(current_id == -1)) ? -1 : 0; - } else if (unlikely(main_interpreter_id != current_id)) - #else - static PyInterpreterState *main_interpreter = NULL; - PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; - if (!main_interpreter) { - main_interpreter = current_interpreter; - } else if (unlikely(main_interpreter != current_interpreter)) - #endif - { - PyErr_SetString( - PyExc_ImportError, - "Interpreter change detected - this module can only be loaded into one interpreter per process."); - return -1; - } - return 0; -} -static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { - PyObject *value = PyObject_GetAttrString(spec, from_name); - int result = 0; - if (likely(value)) { - if (allow_none || value != Py_None) { - result = PyDict_SetItemString(moddict, to_name, value); - } - Py_DECREF(value); - } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Clear(); - } else { - result = -1; - } - return result; -} -static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { - PyObject *module = NULL, *moddict, *modname; - if (__Pyx_check_single_interpreter()) - return NULL; - if (__pyx_m) - return __Pyx_NewRef(__pyx_m); - modname = PyObject_GetAttrString(spec, "name"); - if (unlikely(!modname)) goto bad; - module = PyModule_NewObject(modname); - Py_DECREF(modname); - if (unlikely(!module)) goto bad; - moddict = PyModule_GetDict(module); - if (unlikely(!moddict)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; - if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; - return module; -bad: - Py_XDECREF(module); - return NULL; -} - - -static CYTHON_SMALL_CODE int __pyx_pymod_exec_core(PyObject *__pyx_pyinit_module) -#endif -#endif -{ - PyObject *__pyx_t_1 = NULL; - static PyThread_type_lock __pyx_t_2[8]; - int __pyx_lineno = 0; - const char *__pyx_filename = NULL; - int __pyx_clineno = 0; - __Pyx_RefNannyDeclarations - #if CYTHON_PEP489_MULTI_PHASE_INIT - if (__pyx_m) { - if (__pyx_m == __pyx_pyinit_module) return 0; - PyErr_SetString(PyExc_RuntimeError, "Module 'core' has already been imported. Re-initialisation is not supported."); - return -1; - } - #elif PY_MAJOR_VERSION >= 3 - if (__pyx_m) return __Pyx_NewRef(__pyx_m); - #endif - #if CYTHON_REFNANNY -__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); -if (!__Pyx_RefNanny) { - PyErr_Clear(); - __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); - if (!__Pyx_RefNanny) - Py_FatalError("failed to import 'refnanny' module"); -} -#endif - __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_core(void)", 0); - if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pxy_PyFrame_Initialize_Offsets - __Pxy_PyFrame_Initialize_Offsets(); - #endif - __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) - __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) - #ifdef __Pyx_CyFunction_USED - if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_FusedFunction_USED - if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Coroutine_USED - if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_Generator_USED - if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_AsyncGen_USED - if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - #ifdef __Pyx_StopAsyncIteration_USED - if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - /*--- Library function declarations ---*/ - /*--- Threads initialization code ---*/ - #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS - #ifdef WITH_THREAD /* Python build with threading support? */ - PyEval_InitThreads(); - #endif - #endif - /*--- Module creation code ---*/ - #if CYTHON_PEP489_MULTI_PHASE_INIT - __pyx_m = __pyx_pyinit_module; - Py_INCREF(__pyx_m); - #else - #if PY_MAJOR_VERSION < 3 - __pyx_m = Py_InitModule4("core", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); - #else - __pyx_m = PyModule_Create(&__pyx_moduledef); - #endif - if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_d); - __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_b); - __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) - Py_INCREF(__pyx_cython_runtime); - if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); - /*--- Initialize various global constants etc. ---*/ - if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) - if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - if (__pyx_module_is_main_monotonic_align__core) { - if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - } - #if PY_MAJOR_VERSION >= 3 - { - PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) - if (!PyDict_GetItemString(modules, "monotonic_align.core")) { - if (unlikely(PyDict_SetItemString(modules, "monotonic_align.core", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - } - } - #endif - /*--- Builtin init code ---*/ - if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Constants init code ---*/ - if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - /*--- Global type/function init code ---*/ - (void)__Pyx_modinit_global_init_code(); - (void)__Pyx_modinit_variable_export_code(); - (void)__Pyx_modinit_function_export_code(); - if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error) - (void)__Pyx_modinit_type_import_code(); - (void)__Pyx_modinit_variable_import_code(); - (void)__Pyx_modinit_function_import_code(); - /*--- Execution code ---*/ - #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) - if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) - #endif - - /* "monotonic_align/core.pyx":7 - * @cython.boundscheck(False) - * @cython.wraparound(False) - * cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: # <<<<<<<<<<<<<< - * cdef int x - * cdef int y - */ - __pyx_k_ = (-1e9); - - /* "monotonic_align/core.pyx":1 - * cimport cython # <<<<<<<<<<<<<< - * from cython.parallel import prange - * - */ - __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "View.MemoryView":209 - * info.obj = self - * - * __pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * def __dealloc__(array self): - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 209, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_array_type); - - /* "View.MemoryView":286 - * return self.name - * - * cdef generic = Enum("") # <<<<<<<<<<<<<< - * cdef strided = Enum("") # default - * cdef indirect = Enum("") - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(generic); - __Pyx_DECREF_SET(generic, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":287 - * - * cdef generic = Enum("") - * cdef strided = Enum("") # default # <<<<<<<<<<<<<< - * cdef indirect = Enum("") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 287, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(strided); - __Pyx_DECREF_SET(strided, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":288 - * cdef generic = Enum("") - * cdef strided = Enum("") # default - * cdef indirect = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 288, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect); - __Pyx_DECREF_SET(indirect, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":291 - * - * - * cdef contiguous = Enum("") # <<<<<<<<<<<<<< - * cdef indirect_contiguous = Enum("") - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 291, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(contiguous); - __Pyx_DECREF_SET(contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":292 - * - * cdef contiguous = Enum("") - * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 292, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - __Pyx_XGOTREF(indirect_contiguous); - __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); - __Pyx_GIVEREF(__pyx_t_1); - __pyx_t_1 = 0; - - /* "View.MemoryView":316 - * - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ - * PyThread_allocate_lock(), - */ - __pyx_memoryview_thread_locks_used = 0; - - /* "View.MemoryView":317 - * DEF THREAD_LOCKS_PREALLOCATED = 8 - * cdef int __pyx_memoryview_thread_locks_used = 0 - * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< - * PyThread_allocate_lock(), - * PyThread_allocate_lock(), - */ - __pyx_t_2[0] = PyThread_allocate_lock(); - __pyx_t_2[1] = PyThread_allocate_lock(); - __pyx_t_2[2] = PyThread_allocate_lock(); - __pyx_t_2[3] = PyThread_allocate_lock(); - __pyx_t_2[4] = PyThread_allocate_lock(); - __pyx_t_2[5] = PyThread_allocate_lock(); - __pyx_t_2[6] = PyThread_allocate_lock(); - __pyx_t_2[7] = PyThread_allocate_lock(); - memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); - - /* "View.MemoryView":549 - * info.obj = self - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 549, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 549, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryview_type); - - /* "View.MemoryView":995 - * return self.from_object - * - * __pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< - * - * - */ - __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 995, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 995, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - PyType_Modified(__pyx_memoryviewslice_type); - - /* "(tree fragment)":1 - * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< - * cdef object __pyx_PickleError - * cdef object __pyx_result - */ - __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_GOTREF(__pyx_t_1); - if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) - __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; - - /* "(tree fragment)":11 - * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) - * return __pyx_result - * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< - * __pyx_result.name = __pyx_state[0] - * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): - */ - - /*--- Wrapped vars code ---*/ - - goto __pyx_L0; - __pyx_L1_error:; - __Pyx_XDECREF(__pyx_t_1); - if (__pyx_m) { - if (__pyx_d) { - __Pyx_AddTraceback("init monotonic_align.core", __pyx_clineno, __pyx_lineno, __pyx_filename); - } - Py_CLEAR(__pyx_m); - } else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_ImportError, "init monotonic_align.core"); - } - __pyx_L0:; - __Pyx_RefNannyFinishContext(); - #if CYTHON_PEP489_MULTI_PHASE_INIT - return (__pyx_m != NULL) ? 0 : -1; - #elif PY_MAJOR_VERSION >= 3 - return __pyx_m; - #else - return; - #endif -} - -/* --- Runtime support code --- */ -/* Refnanny */ -#if CYTHON_REFNANNY -static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { - PyObject *m = NULL, *p = NULL; - void *r = NULL; - m = PyImport_ImportModule(modname); - if (!m) goto end; - p = PyObject_GetAttrString(m, "RefNannyAPI"); - if (!p) goto end; - r = PyLong_AsVoidPtr(p); -end: - Py_XDECREF(p); - Py_XDECREF(m); - return (__Pyx_RefNannyAPIStruct *)r; -} -#endif - -/* PyObjectGetAttrStr */ -#if CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro)) - return tp->tp_getattro(obj, attr_name); -#if PY_MAJOR_VERSION < 3 - if (likely(tp->tp_getattr)) - return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); -#endif - return PyObject_GetAttr(obj, attr_name); -} -#endif - -/* GetBuiltinName */ -static PyObject *__Pyx_GetBuiltinName(PyObject *name) { - PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); - if (unlikely(!result)) { - PyErr_Format(PyExc_NameError, -#if PY_MAJOR_VERSION >= 3 - "name '%U' is not defined", name); -#else - "name '%.200s' is not defined", PyString_AS_STRING(name)); -#endif - } - return result; -} - -/* MemviewSliceInit */ -static int -__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, - int ndim, - __Pyx_memviewslice *memviewslice, - int memview_is_new_reference) -{ - __Pyx_RefNannyDeclarations - int i, retval=-1; - Py_buffer *buf = &memview->view; - __Pyx_RefNannySetupContext("init_memviewslice", 0); - if (unlikely(memviewslice->memview || memviewslice->data)) { - PyErr_SetString(PyExc_ValueError, - "memviewslice is already initialized!"); - goto fail; - } - if (buf->strides) { - for (i = 0; i < ndim; i++) { - memviewslice->strides[i] = buf->strides[i]; - } - } else { - Py_ssize_t stride = buf->itemsize; - for (i = ndim - 1; i >= 0; i--) { - memviewslice->strides[i] = stride; - stride *= buf->shape[i]; - } - } - for (i = 0; i < ndim; i++) { - memviewslice->shape[i] = buf->shape[i]; - if (buf->suboffsets) { - memviewslice->suboffsets[i] = buf->suboffsets[i]; - } else { - memviewslice->suboffsets[i] = -1; - } - } - memviewslice->memview = memview; - memviewslice->data = (char *)buf->buf; - if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { - Py_INCREF(memview); - } - retval = 0; - goto no_fail; -fail: - memviewslice->memview = 0; - memviewslice->data = 0; - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} -#ifndef Py_NO_RETURN -#define Py_NO_RETURN -#endif -static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { - va_list vargs; - char msg[200]; -#ifdef HAVE_STDARG_PROTOTYPES - va_start(vargs, fmt); -#else - va_start(vargs); -#endif - vsnprintf(msg, 200, fmt, vargs); - va_end(vargs); - Py_FatalError(msg); -} -static CYTHON_INLINE int -__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)++; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE int -__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, - PyThread_type_lock lock) -{ - int result; - PyThread_acquire_lock(lock, 1); - result = (*acquisition_count)--; - PyThread_release_lock(lock); - return result; -} -static CYTHON_INLINE void -__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) -{ - int first_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) - return; - if (unlikely(__pyx_get_slice_count(memview) < 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - first_time = __pyx_add_acquisition_count(memview) == 0; - if (unlikely(first_time)) { - if (have_gil) { - Py_INCREF((PyObject *) memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_INCREF((PyObject *) memview); - PyGILState_Release(_gilstate); - } - } -} -static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, - int have_gil, int lineno) { - int last_time; - struct __pyx_memoryview_obj *memview = memslice->memview; - if (unlikely(!memview || (PyObject *) memview == Py_None)) { - memslice->memview = NULL; - return; - } - if (unlikely(__pyx_get_slice_count(memview) <= 0)) - __pyx_fatalerror("Acquisition count is %d (line %d)", - __pyx_get_slice_count(memview), lineno); - last_time = __pyx_sub_acquisition_count(memview) == 1; - memslice->data = NULL; - if (unlikely(last_time)) { - if (have_gil) { - Py_CLEAR(memslice->memview); - } else { - PyGILState_STATE _gilstate = PyGILState_Ensure(); - Py_CLEAR(memslice->memview); - PyGILState_Release(_gilstate); - } - } else { - memslice->memview = NULL; - } -} - -/* RaiseArgTupleInvalid */ -static void __Pyx_RaiseArgtupleInvalid( - const char* func_name, - int exact, - Py_ssize_t num_min, - Py_ssize_t num_max, - Py_ssize_t num_found) -{ - Py_ssize_t num_expected; - const char *more_or_less; - if (num_found < num_min) { - num_expected = num_min; - more_or_less = "at least"; - } else { - num_expected = num_max; - more_or_less = "at most"; - } - if (exact) { - more_or_less = "exactly"; - } - PyErr_Format(PyExc_TypeError, - "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", - func_name, more_or_less, num_expected, - (num_expected == 1) ? "" : "s", num_found); -} - -/* RaiseDoubleKeywords */ -static void __Pyx_RaiseDoubleKeywordsError( - const char* func_name, - PyObject* kw_name) -{ - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION >= 3 - "%s() got multiple values for keyword argument '%U'", func_name, kw_name); - #else - "%s() got multiple values for keyword argument '%s'", func_name, - PyString_AsString(kw_name)); - #endif -} - -/* ParseKeywords */ -static int __Pyx_ParseOptionalKeywords( - PyObject *kwds, - PyObject **argnames[], - PyObject *kwds2, - PyObject *values[], - Py_ssize_t num_pos_args, - const char* function_name) -{ - PyObject *key = 0, *value = 0; - Py_ssize_t pos = 0; - PyObject*** name; - PyObject*** first_kw_arg = argnames + num_pos_args; - while (PyDict_Next(kwds, &pos, &key, &value)) { - name = first_kw_arg; - while (*name && (**name != key)) name++; - if (*name) { - values[name-argnames] = value; - continue; - } - name = first_kw_arg; - #if PY_MAJOR_VERSION < 3 - if (likely(PyString_Check(key))) { - while (*name) { - if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) - && _PyString_Eq(**name, key)) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - if ((**argname == key) || ( - (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) - && _PyString_Eq(**argname, key))) { - goto arg_passed_twice; - } - argname++; - } - } - } else - #endif - if (likely(PyUnicode_Check(key))) { - while (*name) { - int cmp = (**name == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**name, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) { - values[name-argnames] = value; - break; - } - name++; - } - if (*name) continue; - else { - PyObject*** argname = argnames; - while (argname != first_kw_arg) { - int cmp = (**argname == key) ? 0 : - #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 - (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : - #endif - PyUnicode_Compare(**argname, key); - if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; - if (cmp == 0) goto arg_passed_twice; - argname++; - } - } - } else - goto invalid_keyword_type; - if (kwds2) { - if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; - } else { - goto invalid_keyword; - } - } - return 0; -arg_passed_twice: - __Pyx_RaiseDoubleKeywordsError(function_name, key); - goto bad; -invalid_keyword_type: - PyErr_Format(PyExc_TypeError, - "%.200s() keywords must be strings", function_name); - goto bad; -invalid_keyword: - PyErr_Format(PyExc_TypeError, - #if PY_MAJOR_VERSION < 3 - "%.200s() got an unexpected keyword argument '%.200s'", - function_name, PyString_AsString(key)); - #else - "%s() got an unexpected keyword argument '%U'", - function_name, key); - #endif -bad: - return -1; -} - -/* None */ -static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { - PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); -} - -/* ArgTypeTest */ -static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) -{ - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - else if (exact) { - #if PY_MAJOR_VERSION == 2 - if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; - #endif - } - else { - if (likely(__Pyx_TypeCheck(obj, type))) return 1; - } - PyErr_Format(PyExc_TypeError, - "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", - name, type->tp_name, Py_TYPE(obj)->tp_name); - return 0; -} - -/* PyObjectCall */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { - PyObject *result; - ternaryfunc call = func->ob_type->tp_call; - if (unlikely(!call)) - return PyObject_Call(func, arg, kw); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = (*call)(func, arg, kw); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyErrFetchRestore */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - tmp_type = tstate->curexc_type; - tmp_value = tstate->curexc_value; - tmp_tb = tstate->curexc_traceback; - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - *type = tstate->curexc_type; - *value = tstate->curexc_value; - *tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -} -#endif - -/* RaiseException */ -#if PY_MAJOR_VERSION < 3 -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, - CYTHON_UNUSED PyObject *cause) { - __Pyx_PyThreadState_declare - Py_XINCREF(type); - if (!value || value == Py_None) - value = NULL; - else - Py_INCREF(value); - if (!tb || tb == Py_None) - tb = NULL; - else { - Py_INCREF(tb); - if (!PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto raise_error; - } - } - if (PyType_Check(type)) { -#if CYTHON_COMPILING_IN_PYPY - if (!value) { - Py_INCREF(Py_None); - value = Py_None; - } -#endif - PyErr_NormalizeException(&type, &value, &tb); - } else { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto raise_error; - } - value = type; - type = (PyObject*) Py_TYPE(type); - Py_INCREF(type); - if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto raise_error; - } - } - __Pyx_PyThreadState_assign - __Pyx_ErrRestore(type, value, tb); - return; -raise_error: - Py_XDECREF(value); - Py_XDECREF(type); - Py_XDECREF(tb); - return; -} -#else -static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { - PyObject* owned_instance = NULL; - if (tb == Py_None) { - tb = 0; - } else if (tb && !PyTraceBack_Check(tb)) { - PyErr_SetString(PyExc_TypeError, - "raise: arg 3 must be a traceback or None"); - goto bad; - } - if (value == Py_None) - value = 0; - if (PyExceptionInstance_Check(type)) { - if (value) { - PyErr_SetString(PyExc_TypeError, - "instance exception may not have a separate value"); - goto bad; - } - value = type; - type = (PyObject*) Py_TYPE(value); - } else if (PyExceptionClass_Check(type)) { - PyObject *instance_class = NULL; - if (value && PyExceptionInstance_Check(value)) { - instance_class = (PyObject*) Py_TYPE(value); - if (instance_class != type) { - int is_subclass = PyObject_IsSubclass(instance_class, type); - if (!is_subclass) { - instance_class = NULL; - } else if (unlikely(is_subclass == -1)) { - goto bad; - } else { - type = instance_class; - } - } - } - if (!instance_class) { - PyObject *args; - if (!value) - args = PyTuple_New(0); - else if (PyTuple_Check(value)) { - Py_INCREF(value); - args = value; - } else - args = PyTuple_Pack(1, value); - if (!args) - goto bad; - owned_instance = PyObject_Call(type, args, NULL); - Py_DECREF(args); - if (!owned_instance) - goto bad; - value = owned_instance; - if (!PyExceptionInstance_Check(value)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %R", - type, Py_TYPE(value)); - goto bad; - } - } - } else { - PyErr_SetString(PyExc_TypeError, - "raise: exception class must be a subclass of BaseException"); - goto bad; - } - if (cause) { - PyObject *fixed_cause; - if (cause == Py_None) { - fixed_cause = NULL; - } else if (PyExceptionClass_Check(cause)) { - fixed_cause = PyObject_CallObject(cause, NULL); - if (fixed_cause == NULL) - goto bad; - } else if (PyExceptionInstance_Check(cause)) { - fixed_cause = cause; - Py_INCREF(fixed_cause); - } else { - PyErr_SetString(PyExc_TypeError, - "exception causes must derive from " - "BaseException"); - goto bad; - } - PyException_SetCause(value, fixed_cause); - } - PyErr_SetObject(type, value); - if (tb) { -#if CYTHON_COMPILING_IN_PYPY - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); - Py_INCREF(tb); - PyErr_Restore(tmp_type, tmp_value, tb); - Py_XDECREF(tmp_tb); -#else - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject* tmp_tb = tstate->curexc_traceback; - if (tb != tmp_tb) { - Py_INCREF(tb); - tstate->curexc_traceback = tb; - Py_XDECREF(tmp_tb); - } -#endif - } -bad: - Py_XDECREF(owned_instance); - return; -} -#endif - -/* PyCFunctionFastCall */ -#if CYTHON_FAST_PYCCALL -static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { - PyCFunctionObject *func = (PyCFunctionObject*)func_obj; - PyCFunction meth = PyCFunction_GET_FUNCTION(func); - PyObject *self = PyCFunction_GET_SELF(func); - int flags = PyCFunction_GET_FLAGS(func); - assert(PyCFunction_Check(func)); - assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); - assert(nargs >= 0); - assert(nargs == 0 || args != NULL); - /* _PyCFunction_FastCallDict() must not be called with an exception set, - because it may clear it (directly or indirectly) and so the - caller loses its exception */ - assert(!PyErr_Occurred()); - if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { - return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); - } else { - return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); - } -} -#endif - -/* PyFunctionFastCall */ -#if CYTHON_FAST_PYCALL -static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, - PyObject *globals) { - PyFrameObject *f; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - PyObject **fastlocals; - Py_ssize_t i; - PyObject *result; - assert(globals != NULL); - /* XXX Perhaps we should create a specialized - PyFrame_New() that doesn't take locals, but does - take builtins without sanity checking them. - */ - assert(tstate != NULL); - f = PyFrame_New(tstate, co, globals, NULL); - if (f == NULL) { - return NULL; - } - fastlocals = __Pyx_PyFrame_GetLocalsplus(f); - for (i = 0; i < na; i++) { - Py_INCREF(*args); - fastlocals[i] = *args++; - } - result = PyEval_EvalFrameEx(f,0); - ++tstate->recursion_depth; - Py_DECREF(f); - --tstate->recursion_depth; - return result; -} -#if 1 || PY_VERSION_HEX < 0x030600B1 -static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { - PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); - PyObject *globals = PyFunction_GET_GLOBALS(func); - PyObject *argdefs = PyFunction_GET_DEFAULTS(func); - PyObject *closure; -#if PY_MAJOR_VERSION >= 3 - PyObject *kwdefs; -#endif - PyObject *kwtuple, **k; - PyObject **d; - Py_ssize_t nd; - Py_ssize_t nk; - PyObject *result; - assert(kwargs == NULL || PyDict_Check(kwargs)); - nk = kwargs ? PyDict_Size(kwargs) : 0; - if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { - return NULL; - } - if ( -#if PY_MAJOR_VERSION >= 3 - co->co_kwonlyargcount == 0 && -#endif - likely(kwargs == NULL || nk == 0) && - co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { - if (argdefs == NULL && co->co_argcount == nargs) { - result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); - goto done; - } - else if (nargs == 0 && argdefs != NULL - && co->co_argcount == Py_SIZE(argdefs)) { - /* function called with no arguments, but all parameters have - a default value: use default values as arguments .*/ - args = &PyTuple_GET_ITEM(argdefs, 0); - result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); - goto done; - } - } - if (kwargs != NULL) { - Py_ssize_t pos, i; - kwtuple = PyTuple_New(2 * nk); - if (kwtuple == NULL) { - result = NULL; - goto done; - } - k = &PyTuple_GET_ITEM(kwtuple, 0); - pos = i = 0; - while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { - Py_INCREF(k[i]); - Py_INCREF(k[i+1]); - i += 2; - } - nk = i / 2; - } - else { - kwtuple = NULL; - k = NULL; - } - closure = PyFunction_GET_CLOSURE(func); -#if PY_MAJOR_VERSION >= 3 - kwdefs = PyFunction_GET_KW_DEFAULTS(func); -#endif - if (argdefs != NULL) { - d = &PyTuple_GET_ITEM(argdefs, 0); - nd = Py_SIZE(argdefs); - } - else { - d = NULL; - nd = 0; - } -#if PY_MAJOR_VERSION >= 3 - result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, kwdefs, closure); -#else - result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, - args, (int)nargs, - k, (int)nk, - d, (int)nd, closure); -#endif - Py_XDECREF(kwtuple); -done: - Py_LeaveRecursiveCall(); - return result; -} -#endif -#endif - -/* PyObjectCall2Args */ -static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { - PyObject *args, *result = NULL; - #if CYTHON_FAST_PYCALL - if (PyFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyFunction_FastCall(function, args, 2); - } - #endif - #if CYTHON_FAST_PYCCALL - if (__Pyx_PyFastCFunction_Check(function)) { - PyObject *args[2] = {arg1, arg2}; - return __Pyx_PyCFunction_FastCall(function, args, 2); - } - #endif - args = PyTuple_New(2); - if (unlikely(!args)) goto done; - Py_INCREF(arg1); - PyTuple_SET_ITEM(args, 0, arg1); - Py_INCREF(arg2); - PyTuple_SET_ITEM(args, 1, arg2); - Py_INCREF(function); - result = __Pyx_PyObject_Call(function, args, NULL); - Py_DECREF(args); - Py_DECREF(function); -done: - return result; -} - -/* PyObjectCallMethO */ -#if CYTHON_COMPILING_IN_CPYTHON -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { - PyObject *self, *result; - PyCFunction cfunc; - cfunc = PyCFunction_GET_FUNCTION(func); - self = PyCFunction_GET_SELF(func); - if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) - return NULL; - result = cfunc(self, arg); - Py_LeaveRecursiveCall(); - if (unlikely(!result) && unlikely(!PyErr_Occurred())) { - PyErr_SetString( - PyExc_SystemError, - "NULL result without error in PyObject_Call"); - } - return result; -} -#endif - -/* PyObjectCallOneArg */ -#if CYTHON_COMPILING_IN_CPYTHON -static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_New(1); - if (unlikely(!args)) return NULL; - Py_INCREF(arg); - PyTuple_SET_ITEM(args, 0, arg); - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { -#if CYTHON_FAST_PYCALL - if (PyFunction_Check(func)) { - return __Pyx_PyFunction_FastCall(func, &arg, 1); - } -#endif - if (likely(PyCFunction_Check(func))) { - if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { - return __Pyx_PyObject_CallMethO(func, arg); -#if CYTHON_FAST_PYCCALL - } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { - return __Pyx_PyCFunction_FastCall(func, &arg, 1); -#endif - } - } - return __Pyx__PyObject_CallOneArg(func, arg); -} -#else -static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { - PyObject *result; - PyObject *args = PyTuple_Pack(1, arg); - if (unlikely(!args)) return NULL; - result = __Pyx_PyObject_Call(func, args, NULL); - Py_DECREF(args); - return result; -} -#endif - -/* BytesEquals */ -static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else - if (s1 == s2) { - return (equals == Py_EQ); - } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { - const char *ps1, *ps2; - Py_ssize_t length = PyBytes_GET_SIZE(s1); - if (length != PyBytes_GET_SIZE(s2)) - return (equals == Py_NE); - ps1 = PyBytes_AS_STRING(s1); - ps2 = PyBytes_AS_STRING(s2); - if (ps1[0] != ps2[0]) { - return (equals == Py_NE); - } else if (length == 1) { - return (equals == Py_EQ); - } else { - int result; -#if CYTHON_USE_UNICODE_INTERNALS - Py_hash_t hash1, hash2; - hash1 = ((PyBytesObject*)s1)->ob_shash; - hash2 = ((PyBytesObject*)s2)->ob_shash; - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - return (equals == Py_NE); - } -#endif - result = memcmp(ps1, ps2, (size_t)length); - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { - return (equals == Py_NE); - } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { - return (equals == Py_NE); - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -#endif -} - -/* UnicodeEquals */ -static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { -#if CYTHON_COMPILING_IN_PYPY - return PyObject_RichCompareBool(s1, s2, equals); -#else -#if PY_MAJOR_VERSION < 3 - PyObject* owned_ref = NULL; -#endif - int s1_is_unicode, s2_is_unicode; - if (s1 == s2) { - goto return_eq; - } - s1_is_unicode = PyUnicode_CheckExact(s1); - s2_is_unicode = PyUnicode_CheckExact(s2); -#if PY_MAJOR_VERSION < 3 - if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { - owned_ref = PyUnicode_FromObject(s2); - if (unlikely(!owned_ref)) - return -1; - s2 = owned_ref; - s2_is_unicode = 1; - } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { - owned_ref = PyUnicode_FromObject(s1); - if (unlikely(!owned_ref)) - return -1; - s1 = owned_ref; - s1_is_unicode = 1; - } else if (((!s2_is_unicode) & (!s1_is_unicode))) { - return __Pyx_PyBytes_Equals(s1, s2, equals); - } -#endif - if (s1_is_unicode & s2_is_unicode) { - Py_ssize_t length; - int kind; - void *data1, *data2; - if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) - return -1; - length = __Pyx_PyUnicode_GET_LENGTH(s1); - if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { - goto return_ne; - } -#if CYTHON_USE_UNICODE_INTERNALS - { - Py_hash_t hash1, hash2; - #if CYTHON_PEP393_ENABLED - hash1 = ((PyASCIIObject*)s1)->hash; - hash2 = ((PyASCIIObject*)s2)->hash; - #else - hash1 = ((PyUnicodeObject*)s1)->hash; - hash2 = ((PyUnicodeObject*)s2)->hash; - #endif - if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { - goto return_ne; - } - } -#endif - kind = __Pyx_PyUnicode_KIND(s1); - if (kind != __Pyx_PyUnicode_KIND(s2)) { - goto return_ne; - } - data1 = __Pyx_PyUnicode_DATA(s1); - data2 = __Pyx_PyUnicode_DATA(s2); - if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { - goto return_ne; - } else if (length == 1) { - goto return_eq; - } else { - int result = memcmp(data1, data2, (size_t)(length * kind)); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ) ? (result == 0) : (result != 0); - } - } else if ((s1 == Py_None) & s2_is_unicode) { - goto return_ne; - } else if ((s2 == Py_None) & s1_is_unicode) { - goto return_ne; - } else { - int result; - PyObject* py_result = PyObject_RichCompare(s1, s2, equals); - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - if (!py_result) - return -1; - result = __Pyx_PyObject_IsTrue(py_result); - Py_DECREF(py_result); - return result; - } -return_eq: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_EQ); -return_ne: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(owned_ref); - #endif - return (equals == Py_NE); -#endif -} - -/* None */ -static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { - Py_ssize_t q = a / b; - Py_ssize_t r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* GetAttr */ -static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { -#if CYTHON_USE_TYPE_SLOTS -#if PY_MAJOR_VERSION >= 3 - if (likely(PyUnicode_Check(n))) -#else - if (likely(PyString_Check(n))) -#endif - return __Pyx_PyObject_GetAttrStr(o, n); -#endif - return PyObject_GetAttr(o, n); -} - -/* GetItemInt */ -static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { - PyObject *r; - if (!j) return NULL; - r = PyObject_GetItem(o, j); - Py_DECREF(j); - return r; -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyList_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { - PyObject *r = PyList_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS - Py_ssize_t wrapped_i = i; - if (wraparound & unlikely(i < 0)) { - wrapped_i += PyTuple_GET_SIZE(o); - } - if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); - Py_INCREF(r); - return r; - } - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -#else - return PySequence_GetItem(o, i); -#endif -} -static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, - CYTHON_NCP_UNUSED int wraparound, - CYTHON_NCP_UNUSED int boundscheck) { -#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS - if (is_list || PyList_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); - if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { - PyObject *r = PyList_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } - else if (PyTuple_CheckExact(o)) { - Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); - if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { - PyObject *r = PyTuple_GET_ITEM(o, n); - Py_INCREF(r); - return r; - } - } else { - PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; - if (likely(m && m->sq_item)) { - if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { - Py_ssize_t l = m->sq_length(o); - if (likely(l >= 0)) { - i += l; - } else { - if (!PyErr_ExceptionMatches(PyExc_OverflowError)) - return NULL; - PyErr_Clear(); - } - } - return m->sq_item(o, i); - } - } -#else - if (is_list || PySequence_Check(o)) { - return PySequence_GetItem(o, i); - } -#endif - return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); -} - -/* ObjectGetItem */ -#if CYTHON_USE_TYPE_SLOTS -static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) { - PyObject *runerr; - Py_ssize_t key_value; - PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence; - if (unlikely(!(m && m->sq_item))) { - PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name); - return NULL; - } - key_value = __Pyx_PyIndex_AsSsize_t(index); - if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { - return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); - } - if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { - PyErr_Clear(); - PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name); - } - return NULL; -} -static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) { - PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping; - if (likely(m && m->mp_subscript)) { - return m->mp_subscript(obj, key); - } - return __Pyx_PyObject_GetIndex(obj, key); -} -#endif - -/* decode_c_string */ -static CYTHON_INLINE PyObject* __Pyx_decode_c_string( - const char* cstring, Py_ssize_t start, Py_ssize_t stop, - const char* encoding, const char* errors, - PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { - Py_ssize_t length; - if (unlikely((start < 0) | (stop < 0))) { - size_t slen = strlen(cstring); - if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { - PyErr_SetString(PyExc_OverflowError, - "c-string too long to convert to Python"); - return NULL; - } - length = (Py_ssize_t) slen; - if (start < 0) { - start += length; - if (start < 0) - start = 0; - } - if (stop < 0) - stop += length; - } - if (unlikely(stop <= start)) - return __Pyx_NewRef(__pyx_empty_unicode); - length = stop - start; - cstring += start; - if (decode_func) { - return decode_func(cstring, length, errors); - } else { - return PyUnicode_Decode(cstring, length, encoding, errors); - } -} - -/* PyErrExceptionMatches */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; icurexc_type; - if (exc_type == err) return 1; - if (unlikely(!exc_type)) return 0; - if (unlikely(PyTuple_Check(err))) - return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); - return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); -} -#endif - -/* GetAttr3 */ -static PyObject *__Pyx_GetAttr3Default(PyObject *d) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - return NULL; - __Pyx_PyErr_Clear(); - Py_INCREF(d); - return d; -} -static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { - PyObject *r = __Pyx_GetAttr(o, n); - return (likely(r)) ? r : __Pyx_GetAttr3Default(d); -} - -/* PyDictVersioning */ -#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS -static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; -} -static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { - PyObject **dictptr = NULL; - Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; - if (offset) { -#if CYTHON_COMPILING_IN_CPYTHON - dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); -#else - dictptr = _PyObject_GetDictPtr(obj); -#endif - } - return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; -} -static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { - PyObject *dict = Py_TYPE(obj)->tp_dict; - if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) - return 0; - return obj_dict_version == __Pyx_get_object_dict_version(obj); -} -#endif - -/* GetModuleGlobalName */ -#if CYTHON_USE_DICT_VERSIONS -static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) -#else -static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) -#endif -{ - PyObject *result; -#if !CYTHON_AVOID_BORROWED_REFS -#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 - result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } else if (unlikely(PyErr_Occurred())) { - return NULL; - } -#else - result = PyDict_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } -#endif -#else - result = PyObject_GetItem(__pyx_d, name); - __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) - if (likely(result)) { - return __Pyx_NewRef(result); - } - PyErr_Clear(); -#endif - return __Pyx_GetBuiltinName(name); -} - -/* RaiseTooManyValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { - PyErr_Format(PyExc_ValueError, - "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); -} - -/* RaiseNeedMoreValuesToUnpack */ -static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { - PyErr_Format(PyExc_ValueError, - "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", - index, (index == 1) ? "" : "s"); -} - -/* RaiseNoneIterError */ -static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { - PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); -} - -/* ExtTypeTest */ -static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { - if (unlikely(!type)) { - PyErr_SetString(PyExc_SystemError, "Missing type object"); - return 0; - } - if (likely(__Pyx_TypeCheck(obj, type))) - return 1; - PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", - Py_TYPE(obj)->tp_name, type->tp_name); - return 0; -} - -/* GetTopmostException */ -#if CYTHON_USE_EXC_INFO_STACK -static _PyErr_StackItem * -__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) -{ - _PyErr_StackItem *exc_info = tstate->exc_info; - while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && - exc_info->previous_item != NULL) - { - exc_info = exc_info->previous_item; - } - return exc_info; -} -#endif - -/* SaveResetException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); - *type = exc_info->exc_type; - *value = exc_info->exc_value; - *tb = exc_info->exc_traceback; - #else - *type = tstate->exc_type; - *value = tstate->exc_value; - *tb = tstate->exc_traceback; - #endif - Py_XINCREF(*type); - Py_XINCREF(*value); - Py_XINCREF(*tb); -} -static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = type; - exc_info->exc_value = value; - exc_info->exc_traceback = tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = type; - tstate->exc_value = value; - tstate->exc_traceback = tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -} -#endif - -/* GetException */ -#if CYTHON_FAST_THREAD_STATE -static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) -#else -static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) -#endif -{ - PyObject *local_type, *local_value, *local_tb; -#if CYTHON_FAST_THREAD_STATE - PyObject *tmp_type, *tmp_value, *tmp_tb; - local_type = tstate->curexc_type; - local_value = tstate->curexc_value; - local_tb = tstate->curexc_traceback; - tstate->curexc_type = 0; - tstate->curexc_value = 0; - tstate->curexc_traceback = 0; -#else - PyErr_Fetch(&local_type, &local_value, &local_tb); -#endif - PyErr_NormalizeException(&local_type, &local_value, &local_tb); -#if CYTHON_FAST_THREAD_STATE - if (unlikely(tstate->curexc_type)) -#else - if (unlikely(PyErr_Occurred())) -#endif - goto bad; - #if PY_MAJOR_VERSION >= 3 - if (local_tb) { - if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) - goto bad; - } - #endif - Py_XINCREF(local_tb); - Py_XINCREF(local_type); - Py_XINCREF(local_value); - *type = local_type; - *value = local_value; - *tb = local_tb; -#if CYTHON_FAST_THREAD_STATE - #if CYTHON_USE_EXC_INFO_STACK - { - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = local_type; - exc_info->exc_value = local_value; - exc_info->exc_traceback = local_tb; - } - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = local_type; - tstate->exc_value = local_value; - tstate->exc_traceback = local_tb; - #endif - Py_XDECREF(tmp_type); - Py_XDECREF(tmp_value); - Py_XDECREF(tmp_tb); -#else - PyErr_SetExcInfo(local_type, local_value, local_tb); -#endif - return 0; -bad: - *type = 0; - *value = 0; - *tb = 0; - Py_XDECREF(local_type); - Py_XDECREF(local_value); - Py_XDECREF(local_tb); - return -1; -} - -/* SwapException */ -#if CYTHON_FAST_THREAD_STATE -static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - #if CYTHON_USE_EXC_INFO_STACK - _PyErr_StackItem *exc_info = tstate->exc_info; - tmp_type = exc_info->exc_type; - tmp_value = exc_info->exc_value; - tmp_tb = exc_info->exc_traceback; - exc_info->exc_type = *type; - exc_info->exc_value = *value; - exc_info->exc_traceback = *tb; - #else - tmp_type = tstate->exc_type; - tmp_value = tstate->exc_value; - tmp_tb = tstate->exc_traceback; - tstate->exc_type = *type; - tstate->exc_value = *value; - tstate->exc_traceback = *tb; - #endif - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#else -static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { - PyObject *tmp_type, *tmp_value, *tmp_tb; - PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); - PyErr_SetExcInfo(*type, *value, *tb); - *type = tmp_type; - *value = tmp_value; - *tb = tmp_tb; -} -#endif - -/* Import */ -static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { - PyObject *empty_list = 0; - PyObject *module = 0; - PyObject *global_dict = 0; - PyObject *empty_dict = 0; - PyObject *list; - #if PY_MAJOR_VERSION < 3 - PyObject *py_import; - py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); - if (!py_import) - goto bad; - #endif - if (from_list) - list = from_list; - else { - empty_list = PyList_New(0); - if (!empty_list) - goto bad; - list = empty_list; - } - global_dict = PyModule_GetDict(__pyx_m); - if (!global_dict) - goto bad; - empty_dict = PyDict_New(); - if (!empty_dict) - goto bad; - { - #if PY_MAJOR_VERSION >= 3 - if (level == -1) { - if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, 1); - if (!module) { - if (!PyErr_ExceptionMatches(PyExc_ImportError)) - goto bad; - PyErr_Clear(); - } - } - level = 0; - } - #endif - if (!module) { - #if PY_MAJOR_VERSION < 3 - PyObject *py_level = PyInt_FromLong(level); - if (!py_level) - goto bad; - module = PyObject_CallFunctionObjArgs(py_import, - name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); - Py_DECREF(py_level); - #else - module = PyImport_ImportModuleLevelObject( - name, global_dict, empty_dict, list, level); - #endif - } - } -bad: - #if PY_MAJOR_VERSION < 3 - Py_XDECREF(py_import); - #endif - Py_XDECREF(empty_list); - Py_XDECREF(empty_dict); - return module; -} - -/* FastTypeChecks */ -#if CYTHON_COMPILING_IN_CPYTHON -static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { - while (a) { - a = a->tp_base; - if (a == b) - return 1; - } - return b == &PyBaseObject_Type; -} -static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { - PyObject *mro; - if (a == b) return 1; - mro = a->tp_mro; - if (likely(mro)) { - Py_ssize_t i, n; - n = PyTuple_GET_SIZE(mro); - for (i = 0; i < n; i++) { - if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) - return 1; - } - return 0; - } - return __Pyx_InBases(a, b); -} -#if PY_MAJOR_VERSION == 2 -static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { - PyObject *exception, *value, *tb; - int res; - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - __Pyx_ErrFetch(&exception, &value, &tb); - res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - if (!res) { - res = PyObject_IsSubclass(err, exc_type2); - if (unlikely(res == -1)) { - PyErr_WriteUnraisable(err); - res = 0; - } - } - __Pyx_ErrRestore(exception, value, tb); - return res; -} -#else -static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { - int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; - if (!res) { - res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); - } - return res; -} -#endif -static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { - Py_ssize_t i, n; - assert(PyExceptionClass_Check(exc_type)); - n = PyTuple_GET_SIZE(tuple); -#if PY_MAJOR_VERSION >= 3 - for (i=0; i= 0 || (x^b) >= 0)) - return PyInt_FromLong(x); - return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - #endif - #if CYTHON_USE_PYLONG_INTERNALS - if (likely(PyLong_CheckExact(op1))) { - const long b = intval; - long a, x; -#ifdef HAVE_LONG_LONG - const PY_LONG_LONG llb = intval; - PY_LONG_LONG lla, llx; -#endif - const digit* digits = ((PyLongObject*)op1)->ob_digit; - const Py_ssize_t size = Py_SIZE(op1); - if (likely(__Pyx_sst_abs(size) <= 1)) { - a = likely(size) ? digits[0] : 0; - if (size == -1) a = -a; - } else { - switch (size) { - case -2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 2: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 3: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case -4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - case 4: - if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); - break; -#ifdef HAVE_LONG_LONG - } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { - lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); - goto long_long; -#endif - } - CYTHON_FALLTHROUGH; - default: return PyLong_Type.tp_as_number->nb_add(op1, op2); - } - } - x = a + b; - return PyLong_FromLong(x); -#ifdef HAVE_LONG_LONG - long_long: - llx = lla + llb; - return PyLong_FromLongLong(llx); -#endif - - - } - #endif - if (PyFloat_CheckExact(op1)) { - const long b = intval; - double a = PyFloat_AS_DOUBLE(op1); - double result; - PyFPE_START_PROTECT("add", return NULL) - result = ((double)a) + (double)b; - PyFPE_END_PROTECT(result) - return PyFloat_FromDouble(result); - } - return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); -} -#endif - -/* None */ -static CYTHON_INLINE long __Pyx_div_long(long a, long b) { - long q = a / b; - long r = a - q*b; - q -= ((r != 0) & ((r ^ b) < 0)); - return q; -} - -/* ImportFrom */ -static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { - PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); - if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { - PyErr_Format(PyExc_ImportError, - #if PY_MAJOR_VERSION < 3 - "cannot import name %.230s", PyString_AS_STRING(name)); - #else - "cannot import name %S", name); - #endif - } - return value; -} - -/* HasAttr */ -static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { - PyObject *r; - if (unlikely(!__Pyx_PyBaseString_Check(n))) { - PyErr_SetString(PyExc_TypeError, - "hasattr(): attribute name must be string"); - return -1; - } - r = __Pyx_GetAttr(o, n); - if (unlikely(!r)) { - PyErr_Clear(); - return 0; - } else { - Py_DECREF(r); - return 1; - } -} - -/* PyObject_GenericGetAttrNoDict */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { - PyErr_Format(PyExc_AttributeError, -#if PY_MAJOR_VERSION >= 3 - "'%.50s' object has no attribute '%U'", - tp->tp_name, attr_name); -#else - "'%.50s' object has no attribute '%.400s'", - tp->tp_name, PyString_AS_STRING(attr_name)); -#endif - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { - PyObject *descr; - PyTypeObject *tp = Py_TYPE(obj); - if (unlikely(!PyString_Check(attr_name))) { - return PyObject_GenericGetAttr(obj, attr_name); - } - assert(!tp->tp_dictoffset); - descr = _PyType_Lookup(tp, attr_name); - if (unlikely(!descr)) { - return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); - } - Py_INCREF(descr); - #if PY_MAJOR_VERSION < 3 - if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) - #endif - { - descrgetfunc f = Py_TYPE(descr)->tp_descr_get; - if (unlikely(f)) { - PyObject *res = f(descr, obj, (PyObject *)tp); - Py_DECREF(descr); - return res; - } - } - return descr; -} -#endif - -/* PyObject_GenericGetAttr */ -#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 -static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { - if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { - return PyObject_GenericGetAttr(obj, attr_name); - } - return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); -} -#endif - -/* SetVTable */ -static int __Pyx_SetVtable(PyObject *dict, void *vtable) { -#if PY_VERSION_HEX >= 0x02070000 - PyObject *ob = PyCapsule_New(vtable, 0, 0); -#else - PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); -#endif - if (!ob) - goto bad; - if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) - goto bad; - Py_DECREF(ob); - return 0; -bad: - Py_XDECREF(ob); - return -1; -} - -/* PyObjectGetAttrStrNoError */ -static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { - __Pyx_PyThreadState_declare - __Pyx_PyThreadState_assign - if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) - __Pyx_PyErr_Clear(); -} -static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { - PyObject *result; -#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 - PyTypeObject* tp = Py_TYPE(obj); - if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { - return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); - } -#endif - result = __Pyx_PyObject_GetAttrStr(obj, attr_name); - if (unlikely(!result)) { - __Pyx_PyObject_GetAttrStr_ClearAttributeError(); - } - return result; -} - -/* SetupReduce */ -static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { - int ret; - PyObject *name_attr; - name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); - if (likely(name_attr)) { - ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); - } else { - ret = -1; - } - if (unlikely(ret < 0)) { - PyErr_Clear(); - ret = 0; - } - Py_XDECREF(name_attr); - return ret; -} -static int __Pyx_setup_reduce(PyObject* type_obj) { - int ret = 0; - PyObject *object_reduce = NULL; - PyObject *object_reduce_ex = NULL; - PyObject *reduce = NULL; - PyObject *reduce_ex = NULL; - PyObject *reduce_cython = NULL; - PyObject *setstate = NULL; - PyObject *setstate_cython = NULL; -#if CYTHON_USE_PYTYPE_LOOKUP - if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#else - if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD; -#endif -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#else - object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; -#endif - reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; - if (reduce_ex == object_reduce_ex) { -#if CYTHON_USE_PYTYPE_LOOKUP - object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#else - object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; -#endif - reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; - if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { - reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); - if (likely(reduce_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (reduce == object_reduce || PyErr_Occurred()) { - goto __PYX_BAD; - } - setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); - if (!setstate) PyErr_Clear(); - if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { - setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); - if (likely(setstate_cython)) { - ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; - } else if (!setstate || PyErr_Occurred()) { - goto __PYX_BAD; - } - } - PyType_Modified((PyTypeObject*)type_obj); - } - } - goto __PYX_GOOD; -__PYX_BAD: - if (!PyErr_Occurred()) - PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); - ret = -1; -__PYX_GOOD: -#if !CYTHON_USE_PYTYPE_LOOKUP - Py_XDECREF(object_reduce); - Py_XDECREF(object_reduce_ex); -#endif - Py_XDECREF(reduce); - Py_XDECREF(reduce_ex); - Py_XDECREF(reduce_cython); - Py_XDECREF(setstate); - Py_XDECREF(setstate_cython); - return ret; -} - -/* CLineInTraceback */ -#ifndef CYTHON_CLINE_IN_TRACEBACK -static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) { - PyObject *use_cline; - PyObject *ptype, *pvalue, *ptraceback; -#if CYTHON_COMPILING_IN_CPYTHON - PyObject **cython_runtime_dict; -#endif - if (unlikely(!__pyx_cython_runtime)) { - return c_line; - } - __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); -#if CYTHON_COMPILING_IN_CPYTHON - cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); - if (likely(cython_runtime_dict)) { - __PYX_PY_DICT_LOOKUP_IF_MODIFIED( - use_cline, *cython_runtime_dict, - __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) - } else -#endif - { - PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); - if (use_cline_obj) { - use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; - Py_DECREF(use_cline_obj); - } else { - PyErr_Clear(); - use_cline = NULL; - } - } - if (!use_cline) { - c_line = 0; - PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); - } - else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { - c_line = 0; - } - __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); - return c_line; -} -#endif - -/* CodeObjectCache */ -static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { - int start = 0, mid = 0, end = count - 1; - if (end >= 0 && code_line > entries[end].code_line) { - return count; - } - while (start < end) { - mid = start + (end - start) / 2; - if (code_line < entries[mid].code_line) { - end = mid; - } else if (code_line > entries[mid].code_line) { - start = mid + 1; - } else { - return mid; - } - } - if (code_line <= entries[mid].code_line) { - return mid; - } else { - return mid + 1; - } -} -static PyCodeObject *__pyx_find_code_object(int code_line) { - PyCodeObject* code_object; - int pos; - if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { - return NULL; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { - return NULL; - } - code_object = __pyx_code_cache.entries[pos].code_object; - Py_INCREF(code_object); - return code_object; -} -static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { - int pos, i; - __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; - if (unlikely(!code_line)) { - return; - } - if (unlikely(!entries)) { - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); - if (likely(entries)) { - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = 64; - __pyx_code_cache.count = 1; - entries[0].code_line = code_line; - entries[0].code_object = code_object; - Py_INCREF(code_object); - } - return; - } - pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); - if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { - PyCodeObject* tmp = entries[pos].code_object; - entries[pos].code_object = code_object; - Py_DECREF(tmp); - return; - } - if (__pyx_code_cache.count == __pyx_code_cache.max_count) { - int new_max = __pyx_code_cache.max_count + 64; - entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( - __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); - if (unlikely(!entries)) { - return; - } - __pyx_code_cache.entries = entries; - __pyx_code_cache.max_count = new_max; - } - for (i=__pyx_code_cache.count; i>pos; i--) { - entries[i] = entries[i-1]; - } - entries[pos].code_line = code_line; - entries[pos].code_object = code_object; - __pyx_code_cache.count++; - Py_INCREF(code_object); -} - -/* AddTraceback */ -#include "compile.h" -#include "frameobject.h" -#include "traceback.h" -static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( - const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyObject *py_srcfile = 0; - PyObject *py_funcname = 0; - #if PY_MAJOR_VERSION < 3 - py_srcfile = PyString_FromString(filename); - #else - py_srcfile = PyUnicode_FromString(filename); - #endif - if (!py_srcfile) goto bad; - if (c_line) { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #else - py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); - #endif - } - else { - #if PY_MAJOR_VERSION < 3 - py_funcname = PyString_FromString(funcname); - #else - py_funcname = PyUnicode_FromString(funcname); - #endif - } - if (!py_funcname) goto bad; - py_code = __Pyx_PyCode_New( - 0, - 0, - 0, - 0, - 0, - __pyx_empty_bytes, /*PyObject *code,*/ - __pyx_empty_tuple, /*PyObject *consts,*/ - __pyx_empty_tuple, /*PyObject *names,*/ - __pyx_empty_tuple, /*PyObject *varnames,*/ - __pyx_empty_tuple, /*PyObject *freevars,*/ - __pyx_empty_tuple, /*PyObject *cellvars,*/ - py_srcfile, /*PyObject *filename,*/ - py_funcname, /*PyObject *name,*/ - py_line, - __pyx_empty_bytes /*PyObject *lnotab*/ - ); - Py_DECREF(py_srcfile); - Py_DECREF(py_funcname); - return py_code; -bad: - Py_XDECREF(py_srcfile); - Py_XDECREF(py_funcname); - return NULL; -} -static void __Pyx_AddTraceback(const char *funcname, int c_line, - int py_line, const char *filename) { - PyCodeObject *py_code = 0; - PyFrameObject *py_frame = 0; - PyThreadState *tstate = __Pyx_PyThreadState_Current; - if (c_line) { - c_line = __Pyx_CLineForTraceback(tstate, c_line); - } - py_code = __pyx_find_code_object(c_line ? -c_line : py_line); - if (!py_code) { - py_code = __Pyx_CreateCodeObjectForTraceback( - funcname, c_line, py_line, filename); - if (!py_code) goto bad; - __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); - } - py_frame = PyFrame_New( - tstate, /*PyThreadState *tstate,*/ - py_code, /*PyCodeObject *code,*/ - __pyx_d, /*PyObject *globals,*/ - 0 /*PyObject *locals*/ - ); - if (!py_frame) goto bad; - __Pyx_PyFrame_SetLineNumber(py_frame, py_line); - PyTraceBack_Here(py_frame); -bad: - Py_XDECREF(py_code); - Py_XDECREF(py_frame); -} - -#if PY_MAJOR_VERSION < 3 -static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { - if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); - if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); - PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); - return -1; -} -static void __Pyx_ReleaseBuffer(Py_buffer *view) { - PyObject *obj = view->obj; - if (!obj) return; - if (PyObject_CheckBuffer(obj)) { - PyBuffer_Release(view); - return; - } - if ((0)) {} - view->obj = NULL; - Py_DECREF(obj); -} -#endif - - -/* MemviewSliceIsContig */ -static int -__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) -{ - int i, index, step, start; - Py_ssize_t itemsize = mvs.memview->view.itemsize; - if (order == 'F') { - step = 1; - start = 0; - } else { - step = -1; - start = ndim - 1; - } - for (i = 0; i < ndim; i++) { - index = start + step * i; - if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) - return 0; - itemsize *= mvs.shape[index]; - } - return 1; -} - -/* OverlappingSlices */ -static void -__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, - void **out_start, void **out_end, - int ndim, size_t itemsize) -{ - char *start, *end; - int i; - start = end = slice->data; - for (i = 0; i < ndim; i++) { - Py_ssize_t stride = slice->strides[i]; - Py_ssize_t extent = slice->shape[i]; - if (extent == 0) { - *out_start = *out_end = start; - return; - } else { - if (stride > 0) - end += stride * (extent - 1); - else - start += stride * (extent - 1); - } - } - *out_start = start; - *out_end = end + itemsize; -} -static int -__pyx_slices_overlap(__Pyx_memviewslice *slice1, - __Pyx_memviewslice *slice2, - int ndim, size_t itemsize) -{ - void *start1, *end1, *start2, *end2; - __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); - __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); - return (start1 < end2) && (start2 < end1); -} - -/* Capsule */ -static CYTHON_INLINE PyObject * -__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) -{ - PyObject *cobj; -#if PY_VERSION_HEX >= 0x02070000 - cobj = PyCapsule_New(p, sig, NULL); -#else - cobj = PyCObject_FromVoidPtr(p, NULL); -#endif - return cobj; -} - -/* IsLittleEndian */ -static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) -{ - union { - uint32_t u32; - uint8_t u8[4]; - } S; - S.u32 = 0x01020304; - return S.u8[0] == 4; -} - -/* BufferFormatCheck */ -static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, - __Pyx_BufFmt_StackElem* stack, - __Pyx_TypeInfo* type) { - stack[0].field = &ctx->root; - stack[0].parent_offset = 0; - ctx->root.type = type; - ctx->root.name = "buffer dtype"; - ctx->root.offset = 0; - ctx->head = stack; - ctx->head->field = &ctx->root; - ctx->fmt_offset = 0; - ctx->head->parent_offset = 0; - ctx->new_packmode = '@'; - ctx->enc_packmode = '@'; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->is_complex = 0; - ctx->is_valid_array = 0; - ctx->struct_alignment = 0; - while (type->typegroup == 'S') { - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = 0; - type = type->fields->type; - } -} -static int __Pyx_BufFmt_ParseNumber(const char** ts) { - int count; - const char* t = *ts; - if (*t < '0' || *t > '9') { - return -1; - } else { - count = *t++ - '0'; - while (*t >= '0' && *t <= '9') { - count *= 10; - count += *t++ - '0'; - } - } - *ts = t; - return count; -} -static int __Pyx_BufFmt_ExpectNumber(const char **ts) { - int number = __Pyx_BufFmt_ParseNumber(ts); - if (number == -1) - PyErr_Format(PyExc_ValueError,\ - "Does not understand character buffer dtype format string ('%c')", **ts); - return number; -} -static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { - PyErr_Format(PyExc_ValueError, - "Unexpected format string character: '%c'", ch); -} -static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { - switch (ch) { - case '?': return "'bool'"; - case 'c': return "'char'"; - case 'b': return "'signed char'"; - case 'B': return "'unsigned char'"; - case 'h': return "'short'"; - case 'H': return "'unsigned short'"; - case 'i': return "'int'"; - case 'I': return "'unsigned int'"; - case 'l': return "'long'"; - case 'L': return "'unsigned long'"; - case 'q': return "'long long'"; - case 'Q': return "'unsigned long long'"; - case 'f': return (is_complex ? "'complex float'" : "'float'"); - case 'd': return (is_complex ? "'complex double'" : "'double'"); - case 'g': return (is_complex ? "'complex long double'" : "'long double'"); - case 'T': return "a struct"; - case 'O': return "Python object"; - case 'P': return "a pointer"; - case 's': case 'p': return "a string"; - case 0: return "end"; - default: return "unparseable format string"; - } -} -static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return 2; - case 'i': case 'I': case 'l': case 'L': return 4; - case 'q': case 'Q': return 8; - case 'f': return (is_complex ? 8 : 4); - case 'd': return (is_complex ? 16 : 8); - case 'g': { - PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); - return 0; - } - case 'O': case 'P': return sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(short); - case 'i': case 'I': return sizeof(int); - case 'l': case 'L': return sizeof(long); - #ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(PY_LONG_LONG); - #endif - case 'f': return sizeof(float) * (is_complex ? 2 : 1); - case 'd': return sizeof(double) * (is_complex ? 2 : 1); - case 'g': return sizeof(long double) * (is_complex ? 2 : 1); - case 'O': case 'P': return sizeof(void*); - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -typedef struct { char c; short x; } __Pyx_st_short; -typedef struct { char c; int x; } __Pyx_st_int; -typedef struct { char c; long x; } __Pyx_st_long; -typedef struct { char c; float x; } __Pyx_st_float; -typedef struct { char c; double x; } __Pyx_st_double; -typedef struct { char c; long double x; } __Pyx_st_longdouble; -typedef struct { char c; void *x; } __Pyx_st_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_st_float) - sizeof(float); - case 'd': return sizeof(__Pyx_st_double) - sizeof(double); - case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -/* These are for computing the padding at the end of the struct to align - on the first member of the struct. This will probably the same as above, - but we don't have any guarantees. - */ -typedef struct { short x; char c; } __Pyx_pad_short; -typedef struct { int x; char c; } __Pyx_pad_int; -typedef struct { long x; char c; } __Pyx_pad_long; -typedef struct { float x; char c; } __Pyx_pad_float; -typedef struct { double x; char c; } __Pyx_pad_double; -typedef struct { long double x; char c; } __Pyx_pad_longdouble; -typedef struct { void *x; char c; } __Pyx_pad_void_p; -#ifdef HAVE_LONG_LONG -typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; -#endif -static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { - switch (ch) { - case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; - case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); - case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); - case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); -#ifdef HAVE_LONG_LONG - case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); -#endif - case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); - case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); - case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); - case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); - default: - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } -} -static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { - switch (ch) { - case 'c': - return 'H'; - case 'b': case 'h': case 'i': - case 'l': case 'q': case 's': case 'p': - return 'I'; - case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': - return 'U'; - case 'f': case 'd': case 'g': - return (is_complex ? 'C' : 'R'); - case 'O': - return 'O'; - case 'P': - return 'P'; - default: { - __Pyx_BufFmt_RaiseUnexpectedChar(ch); - return 0; - } - } -} -static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { - if (ctx->head == NULL || ctx->head->field == &ctx->root) { - const char* expected; - const char* quote; - if (ctx->head == NULL) { - expected = "end"; - quote = ""; - } else { - expected = ctx->head->field->type->name; - quote = "'"; - } - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected %s%s%s but got %s", - quote, expected, quote, - __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); - } else { - __Pyx_StructField* field = ctx->head->field; - __Pyx_StructField* parent = (ctx->head - 1)->field; - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", - field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), - parent->type->name, field->name); - } -} -static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { - char group; - size_t size, offset, arraysize = 1; - if (ctx->enc_type == 0) return 0; - if (ctx->head->field->type->arraysize[0]) { - int i, ndim = 0; - if (ctx->enc_type == 's' || ctx->enc_type == 'p') { - ctx->is_valid_array = ctx->head->field->type->ndim == 1; - ndim = 1; - if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { - PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %zu", - ctx->head->field->type->arraysize[0], ctx->enc_count); - return -1; - } - } - if (!ctx->is_valid_array) { - PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", - ctx->head->field->type->ndim, ndim); - return -1; - } - for (i = 0; i < ctx->head->field->type->ndim; i++) { - arraysize *= ctx->head->field->type->arraysize[i]; - } - ctx->is_valid_array = 0; - ctx->enc_count = 1; - } - group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); - do { - __Pyx_StructField* field = ctx->head->field; - __Pyx_TypeInfo* type = field->type; - if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { - size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); - } else { - size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); - } - if (ctx->enc_packmode == '@') { - size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); - size_t align_mod_offset; - if (align_at == 0) return -1; - align_mod_offset = ctx->fmt_offset % align_at; - if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; - if (ctx->struct_alignment == 0) - ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, - ctx->is_complex); - } - if (type->size != size || type->typegroup != group) { - if (type->typegroup == 'C' && type->fields != NULL) { - size_t parent_offset = ctx->head->parent_offset + field->offset; - ++ctx->head; - ctx->head->field = type->fields; - ctx->head->parent_offset = parent_offset; - continue; - } - if ((type->typegroup == 'H' || group == 'H') && type->size == size) { - } else { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - } - offset = ctx->head->parent_offset + field->offset; - if (ctx->fmt_offset != offset) { - PyErr_Format(PyExc_ValueError, - "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", - (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); - return -1; - } - ctx->fmt_offset += size; - if (arraysize) - ctx->fmt_offset += (arraysize - 1) * size; - --ctx->enc_count; - while (1) { - if (field == &ctx->root) { - ctx->head = NULL; - if (ctx->enc_count != 0) { - __Pyx_BufFmt_RaiseExpected(ctx); - return -1; - } - break; - } - ctx->head->field = ++field; - if (field->type == NULL) { - --ctx->head; - field = ctx->head->field; - continue; - } else if (field->type->typegroup == 'S') { - size_t parent_offset = ctx->head->parent_offset + field->offset; - if (field->type->fields->type == NULL) continue; - field = field->type->fields; - ++ctx->head; - ctx->head->field = field; - ctx->head->parent_offset = parent_offset; - break; - } else { - break; - } - } - } while (ctx->enc_count); - ctx->enc_type = 0; - ctx->is_complex = 0; - return 0; -} -static PyObject * -__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) -{ - const char *ts = *tsp; - int i = 0, number, ndim; - ++ts; - if (ctx->new_count != 1) { - PyErr_SetString(PyExc_ValueError, - "Cannot handle repeated arrays in format string"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ndim = ctx->head->field->type->ndim; - while (*ts && *ts != ')') { - switch (*ts) { - case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; - default: break; - } - number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) - return PyErr_Format(PyExc_ValueError, - "Expected a dimension of size %zu, got %d", - ctx->head->field->type->arraysize[i], number); - if (*ts != ',' && *ts != ')') - return PyErr_Format(PyExc_ValueError, - "Expected a comma in format string, got '%c'", *ts); - if (*ts == ',') ts++; - i++; - } - if (i != ndim) - return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", - ctx->head->field->type->ndim, i); - if (!*ts) { - PyErr_SetString(PyExc_ValueError, - "Unexpected end of format string, expected ')'"); - return NULL; - } - ctx->is_valid_array = 1; - ctx->new_count = 1; - *tsp = ++ts; - return Py_None; -} -static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { - int got_Z = 0; - while (1) { - switch(*ts) { - case 0: - if (ctx->enc_type != 0 && ctx->head == NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - if (ctx->head != NULL) { - __Pyx_BufFmt_RaiseExpected(ctx); - return NULL; - } - return ts; - case ' ': - case '\r': - case '\n': - ++ts; - break; - case '<': - if (!__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '>': - case '!': - if (__Pyx_Is_Little_Endian()) { - PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); - return NULL; - } - ctx->new_packmode = '='; - ++ts; - break; - case '=': - case '@': - case '^': - ctx->new_packmode = *ts++; - break; - case 'T': - { - const char* ts_after_sub; - size_t i, struct_count = ctx->new_count; - size_t struct_alignment = ctx->struct_alignment; - ctx->new_count = 1; - ++ts; - if (*ts != '{') { - PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); - return NULL; - } - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - ctx->enc_count = 0; - ctx->struct_alignment = 0; - ++ts; - ts_after_sub = ts; - for (i = 0; i != struct_count; ++i) { - ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); - if (!ts_after_sub) return NULL; - } - ts = ts_after_sub; - if (struct_alignment) ctx->struct_alignment = struct_alignment; - } - break; - case '}': - { - size_t alignment = ctx->struct_alignment; - ++ts; - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_type = 0; - if (alignment && ctx->fmt_offset % alignment) { - ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); - } - } - return ts; - case 'x': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->fmt_offset += ctx->new_count; - ctx->new_count = 1; - ctx->enc_count = 0; - ctx->enc_type = 0; - ctx->enc_packmode = ctx->new_packmode; - ++ts; - break; - case 'Z': - got_Z = 1; - ++ts; - if (*ts != 'f' && *ts != 'd' && *ts != 'g') { - __Pyx_BufFmt_RaiseUnexpectedChar('Z'); - return NULL; - } - CYTHON_FALLTHROUGH; - case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': - case 'l': case 'L': case 'q': case 'Q': - case 'f': case 'd': case 'g': - case 'O': case 'p': - if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && - (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { - ctx->enc_count += ctx->new_count; - ctx->new_count = 1; - got_Z = 0; - ++ts; - break; - } - CYTHON_FALLTHROUGH; - case 's': - if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; - ctx->enc_count = ctx->new_count; - ctx->enc_packmode = ctx->new_packmode; - ctx->enc_type = *ts; - ctx->is_complex = got_Z; - ++ts; - ctx->new_count = 1; - got_Z = 0; - break; - case ':': - ++ts; - while(*ts != ':') ++ts; - ++ts; - break; - case '(': - if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; - break; - default: - { - int number = __Pyx_BufFmt_ExpectNumber(&ts); - if (number == -1) return NULL; - ctx->new_count = (size_t)number; - } - } - } -} - -/* TypeInfoCompare */ - static int -__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) -{ - int i; - if (!a || !b) - return 0; - if (a == b) - return 1; - if (a->size != b->size || a->typegroup != b->typegroup || - a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { - if (a->typegroup == 'H' || b->typegroup == 'H') { - return a->size == b->size; - } else { - return 0; - } - } - if (a->ndim) { - for (i = 0; i < a->ndim; i++) - if (a->arraysize[i] != b->arraysize[i]) - return 0; - } - if (a->typegroup == 'S') { - if (a->flags != b->flags) - return 0; - if (a->fields || b->fields) { - if (!(a->fields && b->fields)) - return 0; - for (i = 0; a->fields[i].type && b->fields[i].type; i++) { - __Pyx_StructField *field_a = a->fields + i; - __Pyx_StructField *field_b = b->fields + i; - if (field_a->offset != field_b->offset || - !__pyx_typeinfo_cmp(field_a->type, field_b->type)) - return 0; - } - return !a->fields[i].type && !b->fields[i].type; - } - } - return 1; -} - -/* MemviewSliceValidateAndInit */ - static int -__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) -{ - if (buf->shape[dim] <= 1) - return 1; - if (buf->strides) { - if (spec & __Pyx_MEMVIEW_CONTIG) { - if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { - if (unlikely(buf->strides[dim] != sizeof(void *))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly contiguous " - "in dimension %d.", dim); - goto fail; - } - } else if (unlikely(buf->strides[dim] != buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_FOLLOW) { - Py_ssize_t stride = buf->strides[dim]; - if (stride < 0) - stride = -stride; - if (unlikely(stride < buf->itemsize)) { - PyErr_SetString(PyExc_ValueError, - "Buffer and memoryview are not contiguous " - "in the same dimension."); - goto fail; - } - } - } else { - if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not contiguous in " - "dimension %d", dim); - goto fail; - } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { - PyErr_Format(PyExc_ValueError, - "C-contiguous buffer is not indirect in " - "dimension %d", dim); - goto fail; - } else if (unlikely(buf->suboffsets)) { - PyErr_SetString(PyExc_ValueError, - "Buffer exposes suboffsets but no strides"); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) -{ - if (spec & __Pyx_MEMVIEW_DIRECT) { - if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { - PyErr_Format(PyExc_ValueError, - "Buffer not compatible with direct access " - "in dimension %d.", dim); - goto fail; - } - } - if (spec & __Pyx_MEMVIEW_PTR) { - if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { - PyErr_Format(PyExc_ValueError, - "Buffer is not indirectly accessible " - "in dimension %d.", dim); - goto fail; - } - } - return 1; -fail: - return 0; -} -static int -__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) -{ - int i; - if (c_or_f_flag & __Pyx_IS_F_CONTIG) { - Py_ssize_t stride = 1; - for (i = 0; i < ndim; i++) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not fortran contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { - Py_ssize_t stride = 1; - for (i = ndim - 1; i >- 1; i--) { - if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { - PyErr_SetString(PyExc_ValueError, - "Buffer not C contiguous."); - goto fail; - } - stride = stride * buf->shape[i]; - } - } - return 1; -fail: - return 0; -} -static int __Pyx_ValidateAndInit_memviewslice( - int *axes_specs, - int c_or_f_flag, - int buf_flags, - int ndim, - __Pyx_TypeInfo *dtype, - __Pyx_BufFmt_StackElem stack[], - __Pyx_memviewslice *memviewslice, - PyObject *original_obj) -{ - struct __pyx_memoryview_obj *memview, *new_memview; - __Pyx_RefNannyDeclarations - Py_buffer *buf; - int i, spec = 0, retval = -1; - __Pyx_BufFmt_Context ctx; - int from_memoryview = __pyx_memoryview_check(original_obj); - __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); - if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) - original_obj)->typeinfo)) { - memview = (struct __pyx_memoryview_obj *) original_obj; - new_memview = NULL; - } else { - memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - original_obj, buf_flags, 0, dtype); - new_memview = memview; - if (unlikely(!memview)) - goto fail; - } - buf = &memview->view; - if (unlikely(buf->ndim != ndim)) { - PyErr_Format(PyExc_ValueError, - "Buffer has wrong number of dimensions (expected %d, got %d)", - ndim, buf->ndim); - goto fail; - } - if (new_memview) { - __Pyx_BufFmt_Init(&ctx, stack, dtype); - if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; - } - if (unlikely((unsigned) buf->itemsize != dtype->size)) { - PyErr_Format(PyExc_ValueError, - "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " - "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", - buf->itemsize, - (buf->itemsize > 1) ? "s" : "", - dtype->name, - dtype->size, - (dtype->size > 1) ? "s" : ""); - goto fail; - } - if (buf->len > 0) { - for (i = 0; i < ndim; i++) { - spec = axes_specs[i]; - if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) - goto fail; - if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) - goto fail; - } - if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) - goto fail; - } - if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, - new_memview != NULL) == -1)) { - goto fail; - } - retval = 0; - goto no_fail; -fail: - Py_XDECREF(new_memview); - retval = -1; -no_fail: - __Pyx_RefNannyFinishContext(); - return retval; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_d_d_dc_float(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_FOLLOW), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 3, - &__Pyx_TypeInfo_float, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* ObjectToMemviewSlice */ - static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dc_int(PyObject *obj, int writable_flag) { - __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; - __Pyx_BufFmt_StackElem stack[1]; - int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_CONTIG) }; - int retcode; - if (obj == Py_None) { - result.memview = (struct __pyx_memoryview_obj *) Py_None; - return result; - } - retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, __Pyx_IS_C_CONTIG, - (PyBUF_C_CONTIGUOUS | PyBUF_FORMAT) | writable_flag, 1, - &__Pyx_TypeInfo_int, stack, - &result, obj); - if (unlikely(retcode == -1)) - goto __pyx_fail; - return result; -__pyx_fail: - result.memview = NULL; - result.data = NULL; - return result; -} - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(int) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(int) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(int) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(int), - little, !is_unsigned); - } -} - -/* CIntFromPyVerify */ - #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) -#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ - __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) -#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ - {\ - func_type value = func_value;\ - if (sizeof(target_type) < sizeof(func_type)) {\ - if (unlikely(value != (func_type) (target_type) value)) {\ - func_type zero = 0;\ - if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ - return (target_type) -1;\ - if (is_unsigned && unlikely(value < zero))\ - goto raise_neg_overflow;\ - else\ - goto raise_overflow;\ - }\ - }\ - return (target_type) value;\ - } - -/* CIntToPy */ - static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; - if (is_unsigned) { - if (sizeof(long) < sizeof(long)) { - return PyInt_FromLong((long) value); - } else if (sizeof(long) <= sizeof(unsigned long)) { - return PyLong_FromUnsignedLong((unsigned long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); -#endif - } - } else { - if (sizeof(long) <= sizeof(long)) { - return PyInt_FromLong((long) value); -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - return PyLong_FromLongLong((PY_LONG_LONG) value); -#endif - } - } - { - int one = 1; int little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&value; - return _PyLong_FromByteArray(bytes, sizeof(long), - little, !is_unsigned); - } -} - -/* MemviewSliceCopyTemplate */ - static __Pyx_memviewslice -__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, - const char *mode, int ndim, - size_t sizeof_dtype, int contig_flag, - int dtype_is_object) -{ - __Pyx_RefNannyDeclarations - int i; - __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; - struct __pyx_memoryview_obj *from_memview = from_mvs->memview; - Py_buffer *buf = &from_memview->view; - PyObject *shape_tuple = NULL; - PyObject *temp_int = NULL; - struct __pyx_array_obj *array_obj = NULL; - struct __pyx_memoryview_obj *memview_obj = NULL; - __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); - for (i = 0; i < ndim; i++) { - if (unlikely(from_mvs->suboffsets[i] >= 0)) { - PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " - "indirect dimensions (axis %d)", i); - goto fail; - } - } - shape_tuple = PyTuple_New(ndim); - if (unlikely(!shape_tuple)) { - goto fail; - } - __Pyx_GOTREF(shape_tuple); - for(i = 0; i < ndim; i++) { - temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); - if(unlikely(!temp_int)) { - goto fail; - } else { - PyTuple_SET_ITEM(shape_tuple, i, temp_int); - temp_int = NULL; - } - } - array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); - if (unlikely(!array_obj)) { - goto fail; - } - __Pyx_GOTREF(array_obj); - memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( - (PyObject *) array_obj, contig_flag, - dtype_is_object, - from_mvs->memview->typeinfo); - if (unlikely(!memview_obj)) - goto fail; - if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) - goto fail; - if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, - dtype_is_object) < 0)) - goto fail; - goto no_fail; -fail: - __Pyx_XDECREF(new_mvs.memview); - new_mvs.memview = NULL; - new_mvs.data = NULL; -no_fail: - __Pyx_XDECREF(shape_tuple); - __Pyx_XDECREF(temp_int); - __Pyx_XDECREF(array_obj); - __Pyx_RefNannyFinishContext(); - return new_mvs; -} - -/* CIntFromPy */ - static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { - const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(int) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (int) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { - return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { - return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { - return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (int) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(int) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (int) 0; - case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) - case -2: - if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(int) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(int) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(int) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { - return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); - } - } - break; - } -#endif - if (sizeof(int) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - int val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (int) -1; - } - } else { - int val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (int) -1; - val = __Pyx_PyInt_As_int(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to int"); - return (int) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to int"); - return (int) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { - const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(long) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (long) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { - return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { - return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { - return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (long) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(long) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (long) 0; - case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) - case -2: - if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(long) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(long) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(long) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { - return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); - } - } - break; - } -#endif - if (sizeof(long) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - long val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (long) -1; - } - } else { - long val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (long) -1; - val = __Pyx_PyInt_As_long(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to long"); - return (long) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to long"); - return (long) -1; -} - -/* CIntFromPy */ - static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { - const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; - const int is_unsigned = neg_one > const_zero; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x))) { - if (sizeof(char) < sizeof(long)) { - __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) - } else { - long val = PyInt_AS_LONG(x); - if (is_unsigned && unlikely(val < 0)) { - goto raise_neg_overflow; - } - return (char) val; - } - } else -#endif - if (likely(PyLong_Check(x))) { - if (is_unsigned) { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { - return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { - return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { - return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); - } - } - break; - } -#endif -#if CYTHON_COMPILING_IN_CPYTHON - if (unlikely(Py_SIZE(x) < 0)) { - goto raise_neg_overflow; - } -#else - { - int result = PyObject_RichCompareBool(x, Py_False, Py_LT); - if (unlikely(result < 0)) - return (char) -1; - if (unlikely(result == 1)) - goto raise_neg_overflow; - } -#endif - if (sizeof(char) <= sizeof(unsigned long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) -#endif - } - } else { -#if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)x)->ob_digit; - switch (Py_SIZE(x)) { - case 0: return (char) 0; - case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) - case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) - case -2: - if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 2: - if (8 * sizeof(char) > 1 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -3: - if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 3: - if (8 * sizeof(char) > 2 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case -4: - if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - case 4: - if (8 * sizeof(char) > 3 * PyLong_SHIFT) { - if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { - __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) - } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { - return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); - } - } - break; - } -#endif - if (sizeof(char) <= sizeof(long)) { - __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) -#ifdef HAVE_LONG_LONG - } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { - __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) -#endif - } - } - { -#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) - PyErr_SetString(PyExc_RuntimeError, - "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); -#else - char val; - PyObject *v = __Pyx_PyNumber_IntOrLong(x); - #if PY_MAJOR_VERSION < 3 - if (likely(v) && !PyLong_Check(v)) { - PyObject *tmp = v; - v = PyNumber_Long(tmp); - Py_DECREF(tmp); - } - #endif - if (likely(v)) { - int one = 1; int is_little = (int)*(unsigned char *)&one; - unsigned char *bytes = (unsigned char *)&val; - int ret = _PyLong_AsByteArray((PyLongObject *)v, - bytes, sizeof(val), - is_little, !is_unsigned); - Py_DECREF(v); - if (likely(!ret)) - return val; - } -#endif - return (char) -1; - } - } else { - char val; - PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); - if (!tmp) return (char) -1; - val = __Pyx_PyInt_As_char(tmp); - Py_DECREF(tmp); - return val; - } -raise_overflow: - PyErr_SetString(PyExc_OverflowError, - "value too large to convert to char"); - return (char) -1; -raise_neg_overflow: - PyErr_SetString(PyExc_OverflowError, - "can't convert negative value to char"); - return (char) -1; -} - -/* CheckBinaryVersion */ - static int __Pyx_check_binary_version(void) { - char ctversion[4], rtversion[4]; - PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); - PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); - if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { - char message[200]; - PyOS_snprintf(message, sizeof(message), - "compiletime version %s of module '%.100s' " - "does not match runtime version %s", - ctversion, __Pyx_MODULE_NAME, rtversion); - return PyErr_WarnEx(NULL, message, 1); - } - return 0; -} - -/* InitStrings */ - static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { - while (t->p) { - #if PY_MAJOR_VERSION < 3 - if (t->is_unicode) { - *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); - } else if (t->intern) { - *t->p = PyString_InternFromString(t->s); - } else { - *t->p = PyString_FromStringAndSize(t->s, t->n - 1); - } - #else - if (t->is_unicode | t->is_str) { - if (t->intern) { - *t->p = PyUnicode_InternFromString(t->s); - } else if (t->encoding) { - *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); - } else { - *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); - } - } else { - *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); - } - #endif - if (!*t->p) - return -1; - if (PyObject_Hash(*t->p) == -1) - return -1; - ++t; - } - return 0; -} - -static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { - return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); -} -static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { - Py_ssize_t ignore; - return __Pyx_PyObject_AsStringAndSize(o, &ignore); -} -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT -#if !CYTHON_PEP393_ENABLED -static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - char* defenc_c; - PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); - if (!defenc) return NULL; - defenc_c = PyBytes_AS_STRING(defenc); -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - { - char* end = defenc_c + PyBytes_GET_SIZE(defenc); - char* c; - for (c = defenc_c; c < end; c++) { - if ((unsigned char) (*c) >= 128) { - PyUnicode_AsASCIIString(o); - return NULL; - } - } - } -#endif - *length = PyBytes_GET_SIZE(defenc); - return defenc_c; -} -#else -static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { - if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - if (likely(PyUnicode_IS_ASCII(o))) { - *length = PyUnicode_GET_LENGTH(o); - return PyUnicode_AsUTF8(o); - } else { - PyUnicode_AsASCIIString(o); - return NULL; - } -#else - return PyUnicode_AsUTF8AndSize(o, length); -#endif -} -#endif -#endif -static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { -#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT - if ( -#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII - __Pyx_sys_getdefaultencoding_not_ascii && -#endif - PyUnicode_Check(o)) { - return __Pyx_PyUnicode_AsStringAndSize(o, length); - } else -#endif -#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) - if (PyByteArray_Check(o)) { - *length = PyByteArray_GET_SIZE(o); - return PyByteArray_AS_STRING(o); - } else -#endif - { - char* result; - int r = PyBytes_AsStringAndSize(o, &result, length); - if (unlikely(r < 0)) { - return NULL; - } else { - return result; - } - } -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { - int is_true = x == Py_True; - if (is_true | (x == Py_False) | (x == Py_None)) return is_true; - else return PyObject_IsTrue(x); -} -static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { - int retval; - if (unlikely(!x)) return -1; - retval = __Pyx_PyObject_IsTrue(x); - Py_DECREF(x); - return retval; -} -static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { -#if PY_MAJOR_VERSION >= 3 - if (PyLong_Check(result)) { - if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, - "__int__ returned non-int (type %.200s). " - "The ability to return an instance of a strict subclass of int " - "is deprecated, and may be removed in a future version of Python.", - Py_TYPE(result)->tp_name)) { - Py_DECREF(result); - return NULL; - } - return result; - } -#endif - PyErr_Format(PyExc_TypeError, - "__%.4s__ returned non-%.4s (type %.200s)", - type_name, type_name, Py_TYPE(result)->tp_name); - Py_DECREF(result); - return NULL; -} -static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { -#if CYTHON_USE_TYPE_SLOTS - PyNumberMethods *m; -#endif - const char *name = NULL; - PyObject *res = NULL; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_Check(x) || PyLong_Check(x))) -#else - if (likely(PyLong_Check(x))) -#endif - return __Pyx_NewRef(x); -#if CYTHON_USE_TYPE_SLOTS - m = Py_TYPE(x)->tp_as_number; - #if PY_MAJOR_VERSION < 3 - if (m && m->nb_int) { - name = "int"; - res = m->nb_int(x); - } - else if (m && m->nb_long) { - name = "long"; - res = m->nb_long(x); - } - #else - if (likely(m && m->nb_int)) { - name = "int"; - res = m->nb_int(x); - } - #endif -#else - if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { - res = PyNumber_Int(x); - } -#endif - if (likely(res)) { -#if PY_MAJOR_VERSION < 3 - if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { -#else - if (unlikely(!PyLong_CheckExact(res))) { -#endif - return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); - } - } - else if (!PyErr_Occurred()) { - PyErr_SetString(PyExc_TypeError, - "an integer is required"); - } - return res; -} -static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { - Py_ssize_t ival; - PyObject *x; -#if PY_MAJOR_VERSION < 3 - if (likely(PyInt_CheckExact(b))) { - if (sizeof(Py_ssize_t) >= sizeof(long)) - return PyInt_AS_LONG(b); - else - return PyInt_AsSsize_t(b); - } -#endif - if (likely(PyLong_CheckExact(b))) { - #if CYTHON_USE_PYLONG_INTERNALS - const digit* digits = ((PyLongObject*)b)->ob_digit; - const Py_ssize_t size = Py_SIZE(b); - if (likely(__Pyx_sst_abs(size) <= 1)) { - ival = likely(size) ? digits[0] : 0; - if (size == -1) ival = -ival; - return ival; - } else { - switch (size) { - case 2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -2: - if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -3: - if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case 4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - case -4: - if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { - return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); - } - break; - } - } - #endif - return PyLong_AsSsize_t(b); - } - x = PyNumber_Index(b); - if (!x) return -1; - ival = PyInt_AsSsize_t(x); - Py_DECREF(x); - return ival; -} -static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { - return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); -} -static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { - return PyInt_FromSize_t(ival); -} - - -#endif /* Py_PYTHON_H */ diff --git a/nemo/collections/tts/modules/monotonic_align/core.pyx b/nemo/collections/tts/modules/monotonic_align/core.pyx deleted file mode 100644 index bfaabd4d21c2..000000000000 --- a/nemo/collections/tts/modules/monotonic_align/core.pyx +++ /dev/null @@ -1,42 +0,0 @@ -cimport cython -from cython.parallel import prange - - -@cython.boundscheck(False) -@cython.wraparound(False) -cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_y, int t_x, float max_neg_val=-1e9) nogil: - cdef int x - cdef int y - cdef float v_prev - cdef float v_cur - cdef float tmp - cdef int index = t_x - 1 - - for y in range(t_y): - for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): - if x == y: - v_cur = max_neg_val - else: - v_cur = value[y-1, x] - if x == 0: - if y == 0: - v_prev = 0. - else: - v_prev = max_neg_val - else: - v_prev = value[y-1, x-1] - value[y, x] += max(v_prev, v_cur) - - for y in range(t_y - 1, -1, -1): - path[y, index] = 1 - if index != 0 and (index == y or value[y-1, index] < value[y-1, index-1]): - index = index - 1 - - -@cython.boundscheck(False) -@cython.wraparound(False) -cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_ys, int[::1] t_xs) nogil: - cdef int b = paths.shape[0] - cdef int i - for i in prange(b, nogil=True): - maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) diff --git a/nemo/collections/tts/modules/monotonic_align/setup.py b/nemo/collections/tts/modules/monotonic_align/setup.py deleted file mode 100644 index 2410fa8237b9..000000000000 --- a/nemo/collections/tts/modules/monotonic_align/setup.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# MIT License -# -# Copyright (c) 2021 Jaehyeon Kim -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from distutils.core import setup - -import numpy - -setup( - name='monotonic_align', - # ext_modules=cythonize("core.pyx"), - include_dirs=[numpy.get_include()], -) From c2e16cacdd76b0399f8a1f116a85cd18c07a4553 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 15 Dec 2022 12:27:17 +0000 Subject: [PATCH 230/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/modules/monotonic_align/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index e3b113ef9ef7..fd2b8aa6ce8f 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -39,6 +39,7 @@ from .numba_core import maximum_path_c + def maximum_path(neg_cent, mask): """ Cython optimized version. neg_cent: [b, t_t, t_s] From 089fdf67d599695bdbd45e12d90ce217c49e516c Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 15 Dec 2022 04:34:16 -0800 Subject: [PATCH 231/244] docstring fix Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/modules/monotonic_align/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index a17eff1d3712..dc97c13f6175 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -40,7 +40,7 @@ def maximum_path(neg_cent, mask): - """ Cython optimized version. + """ Numba version. neg_cent: [b, t_t, t_s] mask: [b, t_t, t_s] """ From 65d78866e7b5610fe4e56b1a06515f6f4e58a87f Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 22 Dec 2022 04:32:35 -0800 Subject: [PATCH 232/244] Enhancements Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/helpers/helpers.py | 2 +- nemo/collections/tts/models/vits.py | 30 ++- .../tts/modules/monotonic_align/__init__.py | 20 +- .../tts/modules/monotonic_align/numba_core.py | 18 +- nemo/collections/tts/modules/vits_modules.py | 178 +++++++++--------- nemo/collections/tts/torch/data.py | 1 + 6 files changed, 132 insertions(+), 117 deletions(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index e6c7e7fc707a..c807076f5ead 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -545,7 +545,7 @@ def slice_segments(x, ids_str, segment_size=4): def rand_slice_segments(x, x_lengths=None, segment_size=4): """ - Chooses random indices and lices segments from batch + Chooses random indices and slices segments from batch """ b, d, t = x.size() if x_lengths is None: diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 2d2e6091cc93..939f97735fdf 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -31,7 +31,9 @@ from nemo.collections.tts.modules.vits_modules import MultiPeriodDiscriminator from nemo.collections.tts.torch.data import DistributedBucketSampler from nemo.collections.tts.torch.tts_data_types import SpeakerID -from nemo.core.classes.common import PretrainedModelInfo +from nemo.core.classes.common import PretrainedModelInfo, typecheck +from nemo.core.neural_types.elements import AudioSignal, FloatType, Index, IntType, TokenIndex +from nemo.core.neural_types.neural_type import NeuralType from nemo.core.optim.lr_scheduler import CosineAnnealing from nemo.utils import logging, model_utils from nemo.utils.decorators.experimental import experimental @@ -162,14 +164,24 @@ def configure_optimizers(self): return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] else: return [optim_g, optim_d] - + # for inference - def forward(self, tokens, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=1000): + @typecheck( + input_types={ + "text": NeuralType(('B', 'T_text'), TokenIndex()), + "speaker": NeuralType(('B',), Index(), optional=True), + "noise_scale": NeuralType(('B',), FloatType(), optional=True), + "length_scale": NeuralType(('B',), FloatType(), optional=True), + "noise_scale_w": NeuralType(('B',), FloatType(), optional=True), + "max_len": NeuralType(('B',), IntType(), optional=True), + } + ) + def forward(self, tokens, speakers=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=1000): text_len = torch.tensor([tokens.size(-1)]).to(int).to(tokens.device) audio_pred, attn, y_mask, (z, z_p, m_p, logs_p) = self.net_g.infer( tokens, text_len, - sid=sid, + speakers=speakers, noise_scale=noise_scale, length_scale=length_scale, noise_scale_w=noise_scale_w, @@ -362,6 +374,10 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': list_of_models = [] # TODO: List available models?? return list_of_models - - def convert_text_to_waveform(self, *, tokens, sid=None): - return self(tokens, sid=sid)[0].squeeze(1) + + @typecheck( + input_types={"text_tokens": NeuralType(('B', 'T_text'), TokenIndex())}, + output_types={"audio": NeuralType(('B', 'T_audio'), AudioSignal())}, + ) + def convert_text_to_waveform(self, *, tokens, speakers=None): + return self(tokens, speakers=speakers)[0].squeeze(1) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index dc97c13f6175..da36a9eccd7e 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -33,23 +33,5 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import numpy as np -import torch -from .numba_core import maximum_path_c - - -def maximum_path(neg_cent, mask): - """ Numba version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) +from .numba_core import maximum_path diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index 34ffd5fa3e35..f1e22a664d30 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -14,7 +14,8 @@ import numba - +import numpy as np +import torch @numba.jit(nopython=True, boundscheck=False, parallel=True) def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): @@ -63,5 +64,20 @@ def maximum_path_c(paths, values, t_ys, t_xs): maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) +def maximum_path(neg_cent, mask): + """ Numba version. + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) + + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) + if __name__ == '__main__': pass diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 1c1dc38ef210..682faddcb4ac 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -114,7 +114,7 @@ def forward(self, x, x_mask): class DDSConv(nn.Module): """ - Dialted and Depth-Separable Convolution + Dilated and Depth-Separable Convolution """ def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): @@ -513,7 +513,7 @@ def __init__( self.emb = nn.Embedding(n_vocab, hidden_channels, padding_idx=padding_idx) nn.init.normal_(self.emb.weight, 0.0, hidden_channels ** -0.5) - self.encoder = Encoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) + self.encoder = AttentionEncoder(hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout) self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths): @@ -845,10 +845,10 @@ def __init__( if n_speakers > 1: self.emb_g = nn.Embedding(n_speakers, gin_channels) - def forward(self, text, text_len, spec, spec_len, sid=None): + def forward(self, text, text_len, spec, spec_len, speakers=None): x, mean_prior, logscale_prior, text_mask = self.enc_p(text, text_len) if self.n_speakers > 1: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + g = self.emb_g(speakers).unsqueeze(-1) # [b, h, 1] else: g = None @@ -900,10 +900,10 @@ def forward(self, text, text_len, spec, spec_len, sid=None): (z, z_p, mean_prior, logscale_prior, mean_posterior, logscale_posterior), ) - def infer(self, text, text_len, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): + def infer(self, text, text_len, speakers=None, noise_scale=1, length_scale=1, noise_scale_w=1.0, max_len=None): x, mean_prior, logscale_prior, text_mask = self.enc_p(text, text_len) - if self.n_speakers > 1 and sid is not None: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] + if self.n_speakers > 1 and speakers is not None: + g = self.emb_g(speakers).unsqueeze(-1) # [b, h, 1] else: g = None @@ -931,10 +931,10 @@ def infer(self, text, text_len, sid=None, noise_scale=1, length_scale=1, noise_s return audio, attn, audio_mask, (z, z_p, mean_prior, logscale_prior) # Can be used for emotions - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): + def voice_conversion(self, y, y_lengths, speaker_src, speaker_tgt): assert self.n_speakers > 1, "n_speakers have to be larger than 1." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) + g_src = self.emb_g(speaker_src).unsqueeze(-1) + g_tgt = self.emb_g(speaker_tgt).unsqueeze(-1) z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) z_p = self.flow(z, y_mask, g=g_src) z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) @@ -945,7 +945,7 @@ def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): ############## # Attentions # ############## -class Encoder(nn.Module): +class AttentionEncoder(nn.Module): def __init__( self, hidden_channels, @@ -997,84 +997,84 @@ def forward(self, x, x_mask): return x -class Decoder(nn.Module): - def __init__( - self, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size=1, - p_dropout=0.0, - proximal_bias=False, - proximal_init=True, - **kwargs - ): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.ModuleList() - self.norm_layers_0 = nn.ModuleList() - self.encdec_attn_layers = nn.ModuleList() - self.norm_layers_1 = nn.ModuleList() - self.ffn_layers = nn.ModuleList() - self.norm_layers_2 = nn.ModuleList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention( - hidden_channels, - hidden_channels, - n_heads, - p_dropout=p_dropout, - proximal_bias=proximal_bias, - proximal_init=proximal_init, - ) - ) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append( - MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout) - ) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True) - ) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = ( - torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))) - .unsqueeze(0) - .unsqueeze(0) - .to(device=x.device, dtype=x.dtype) - ) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x +# class Decoder(nn.Module): +# def __init__( +# self, +# hidden_channels, +# filter_channels, +# n_heads, +# n_layers, +# kernel_size=1, +# p_dropout=0.0, +# proximal_bias=False, +# proximal_init=True, +# **kwargs +# ): +# super().__init__() +# self.hidden_channels = hidden_channels +# self.filter_channels = filter_channels +# self.n_heads = n_heads +# self.n_layers = n_layers +# self.kernel_size = kernel_size +# self.p_dropout = p_dropout +# self.proximal_bias = proximal_bias +# self.proximal_init = proximal_init + +# self.drop = nn.Dropout(p_dropout) +# self.self_attn_layers = nn.ModuleList() +# self.norm_layers_0 = nn.ModuleList() +# self.encdec_attn_layers = nn.ModuleList() +# self.norm_layers_1 = nn.ModuleList() +# self.ffn_layers = nn.ModuleList() +# self.norm_layers_2 = nn.ModuleList() +# for i in range(self.n_layers): +# self.self_attn_layers.append( +# MultiHeadAttention( +# hidden_channels, +# hidden_channels, +# n_heads, +# p_dropout=p_dropout, +# proximal_bias=proximal_bias, +# proximal_init=proximal_init, +# ) +# ) +# self.norm_layers_0.append(LayerNorm(hidden_channels)) +# self.encdec_attn_layers.append( +# MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout) +# ) +# self.norm_layers_1.append(LayerNorm(hidden_channels)) +# self.ffn_layers.append( +# FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True) +# ) +# self.norm_layers_2.append(LayerNorm(hidden_channels)) + +# def forward(self, x, x_mask, h, h_mask): +# """ +# x: decoder input +# h: encoder output +# """ +# self_attn_mask = ( +# torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))) +# .unsqueeze(0) +# .unsqueeze(0) +# .to(device=x.device, dtype=x.dtype) +# ) +# encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) +# x = x * x_mask +# for i in range(self.n_layers): +# y = self.self_attn_layers[i](x, x, self_attn_mask) +# y = self.drop(y) +# x = self.norm_layers_0[i](x + y) + +# y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) +# y = self.drop(y) +# x = self.norm_layers_1[i](x + y) + +# y = self.ffn_layers[i](x, x_mask) +# y = self.drop(y) +# x = self.norm_layers_2[i](x + y) +# x = x * x_mask +# return x class MultiHeadAttention(nn.Module): diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index e5cf138e3d03..3f4ea2e47be2 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -242,6 +242,7 @@ def __init__( file_info["text_tokens"] = self.text_tokenizer(file_info["normalized_text"]) data.append(file_info) + # Calculating length of spectrogram from input audio for batch sampling self.lengths.append(os.path.getsize(item["audio_filepath"]) // (n_fft // 2)) if file_info["duration"] is None: From 828e5d894069da732d25ed6ee350680ff3248b32 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 22 Dec 2022 04:59:27 -0800 Subject: [PATCH 233/244] Enhancements Signed-off-by: Evgeniy Shabalin --- .../Text_(Inverse)_Normalization.ipynb | 934 +++++++++--------- 1 file changed, 467 insertions(+), 467 deletions(-) diff --git a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb index bbf4f2decc6b..f8123146f55f 100755 --- a/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb +++ b/tutorials/text_processing/Text_(Inverse)_Normalization.ipynb @@ -1,468 +1,468 @@ { - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "a5fA5qAm5Afg" - }, - "outputs": [], - "source": [ - "\"\"\"\n", - "You can run either this notebook locally or on Google Colab.\n", - "\n", - "Instructions for setting up Colab are as follows:\n", - "1. Open a new Python 3 notebook.\n", - "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", - "3. Optional: Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", - "\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> **_NOTE:_** Find the official NeMo documentation at \n", - "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Overview\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "F-IrnmXMTevr" - }, - "source": [ - "A sentence can be split up into semiotic tokens stemming from a variety of classes, where the spoken form differs from the written form. Examples are *dates*, *decimals*, *cardinals*, *measures* etc. The good TN or ITN system will be able to handle a variety of **semiotic classes**." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-IT1Xr9iW2Xr" - }, - "source": [ - "# How to use\n", - "## 1. Installation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "## Install NeMo, which installs both nemo and nemo_text_processing package\n", - "BRANCH = 'main'\n", - "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", - "\n", - "# install Pynini for text normalization\n", - "! wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/install_pynini.sh\n", - "! bash install_pynini.sh" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# try to import of nemo_text_processing an other dependencies\n", - "import nemo_text_processing\n", - "import os" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Text Normalization" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "Bfs7fa9lXDDh" - }, - "outputs": [], - "source": [ - "# create text normalization instance that works on cased input\n", - "from nemo_text_processing.text_normalization.normalize import Normalizer\n", - "normalizer = Normalizer(input_case='cased', lang='en')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# the normalizer class offers the following parameterization. \n", - "print(normalizer.__doc__)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "> **_NOTE:_** Standard Text Normalization uses `deterministic=True`, outputting a single output for a given input string\n", - "\n", - "\n", - "\n", - "### 2.1 Run TN on input string" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Normalizer.normalize() offers the following parameterization\n", - "print(normalizer.normalize.__doc__)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run normalization on example string input\n", - "written = \"We paid $123 for this desk.\"\n", - "normalized = normalizer.normalize(written, verbose=True, punct_post_process=True)\n", - "print(normalized)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "intermediate semiotic class information is shown if verbose=True. \n", - "\n", - "Long input text could be split into sentences as follows:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "written = \"Mr. Smith paid $111 in U.S.A. on Dec. 17th. We paid $123 for this desk.\"\n", - "\n", - "# split long text into sentences\n", - "sentences = normalizer.split_text_into_sentences(written)\n", - "\n", - "for sent in sentences:\n", - " print(sent)\n", - "\n", - "# normalize each sentence separately using normalize() or all sentences at once with normalize_list()\n", - "normalizer.normalize_list(sentences)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n", - "### 2.2 Run TN on list of input strings" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "UD-OuFmEOX3T" - }, - "outputs": [], - "source": [ - "# create temporary data folder and example input file\n", - "DATA_DIR = 'tmp_data_dir'\n", - "os.makedirs(DATA_DIR, exist_ok=True)\n", - "INPUT_FILE = f'{DATA_DIR}/inference.txt'\n", - "! echo -e 'The alarm went off at 10:00a.m. \\nI received $123' > $INPUT_FILE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "d4T0gXHwY3JZ" - }, - "outputs": [], - "source": [ - "# check input file was properly created\n", - "! cat $INPUT_FILE" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# load input file into 'data' - a list of strings\n", - "data = []\n", - "with open(INPUT_FILE, 'r') as fp:\n", - " for line in fp:\n", - " data.append(line.strip())\n", - "data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "F5wSJTI8ZFRg" - }, - "outputs": [], - "source": [ - "# run normalization on 'data'\n", - "normalizer.normalize_list(data, punct_post_process=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RMT5lkPYzZHK" - }, - "source": [ - "### 2.3 Evaluate TN on written-normalized text pairs \n", - "\n", - "The evaluation data needs to have the following format:\n", - "\n", - "'on 22 july 2022 they worked until 12:00' and the normalization is represented as " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# example evaluation sentence\n", - "eval_text = \"\"\"PLAIN\\ton\\t\n", - "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", - "PLAIN\\tthey\\t\n", - "PLAIN\\tworked\\t\n", - "PLAIN\\tuntil\\t\n", - "TIME\\t12:00\\ttwelve o'clock\n", - "\\t\n", - "\"\"\"\n", - "EVAL_FILE = f'{DATA_DIR}/eval.txt'\n", - "with open(EVAL_FILE, 'w') as fp:\n", - " fp.write(eval_text)\n", - "! cat $EVAL_FILE" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RMT5lkPYzZHK" - }, - "source": [ - "That is, every sentence is broken into semiotic tokens line by line and concluded by end of sentence token ``. In case of a plain token it's `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] `, otherwise `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] [NORMALIZED]`.\n", - "This format was introduced in [Google Text normalization dataset](https://arxiv.org/abs/1611.00068). " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Parse evaluation file into written and normalized sentence pairs\n", - "from nemo_text_processing.text_normalization.data_loader_utils import load_files, training_data_to_sentences\n", - "eval_data = load_files([EVAL_FILE])\n", - "sentences_un_normalized, sentences_normalized, sentences_class_types = training_data_to_sentences(eval_data)\n", - "print(list(zip(sentences_un_normalized, sentences_normalized)))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run prediction\n", - "sentences_prediction = normalizer.normalize_list(sentences_un_normalized)\n", - "print(sentences_prediction)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# measure sentence accuracy\n", - "from nemo_text_processing.text_normalization.data_loader_utils import evaluate\n", - "sentences_accuracy = evaluate(\n", - " preds=sentences_prediction, labels=sentences_normalized, input=sentences_un_normalized\n", - " )\n", - "print(\"- Accuracy: \" + str(sentences_accuracy))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3. Inverse Text Normalization\n", - "ITN supports equivalent API as TN. Here we are only going to show inverse normalization on input string" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# create inverse text normalization instance\n", - "from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer\n", - "inverse_normalizer = InverseNormalizer(lang='en')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# run ITN on example string input\n", - "spoken = \"we paid one hundred twenty three dollars for this desk\"\n", - "un_normalized = inverse_normalizer.inverse_normalize(spoken, verbose=True)\n", - "print(un_normalized)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 4. Audio-based Text Normalization\n", - "Audio-based text normalization uses extended [WFST](https://en.wikipedia.org/wiki/Finite-state_machine) grammars to provide a range of possible normalization options.\n", - "The following example shows the workflow: (Disclaimer: exact values in graphic do not need to be real system's behavior)\n", - "1. text \"627\" is sent to extended TN WFST grammar\n", - "2. grammar output 5 different options of verbalization based on text input alone\n", - "3. in case an audio file is presented we compare the audio transcript with the verbalization options to find out which normalization is correct based on character error rate. The transcript is generated using a pretrained NeMo ASR model. \n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following shows an example of how to generate multiple normalization options:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# import non-deterministic WFST-based TN module\n", - "from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# initialize normalizer, this may take some time to generate the extended grammars. \n", - "# Thus, we recommend to cache the grammars by specifying a cache directory\n", - "normalizer = NormalizerWithAudio(\n", - " lang=\"en\",\n", - " input_case=\"cased\",\n", - " overwrite_cache=False,\n", - " cache_dir=\"cache_dir\",\n", - " )\n", - "# create up to 10 normalization options\n", - "print(normalizer.normalize(\"123\", n_tagged=10, punct_post_process=True))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 5. Parallel execution\n", - "\n", - "`Normalizer.normalize()` as well as `InverseNormalizer.inverse_normalize()` are functions without side effect.\n", - "Thus, if you need to normalize large amounts of input examples, these can be executed in parallel." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ENMDNl9C4TkF" - }, - "source": [ - "# Tutorial on how to customize grammars\n", - "\n", - "https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lcvT3P2lQ_GS" - }, - "source": [ - "# References and Further Reading:\n", - "\n", - "\n", - "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", - "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", - "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", - "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" - ] - } - ], - "metadata": { - "accelerator": "GPU", - "colab": { - "collapsed_sections": [ - "lcvT3P2lQ_GS" - ], - "name": "Text_Normalization_Tutorial.ipynb", - "private_outputs": true, - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.7" - } - }, - "nbformat": 4, - "nbformat_minor": 1 - } \ No newline at end of file + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "a5fA5qAm5Afg" + }, + "outputs": [], + "source": [ + "\"\"\"\n", + "You can run either this notebook locally or on Google Colab.\n", + "\n", + "Instructions for setting up Colab are as follows:\n", + "1. Open a new Python 3 notebook.\n", + "2. Import this notebook from GitHub (File -> Upload Notebook -> \"GITHUB\" tab -> copy/paste GitHub URL)\n", + "3. Optional: Restart the runtime (Runtime -> Restart Runtime) for any upgraded packages to take effect\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **_NOTE:_** Find the official NeMo documentation at \n", + "https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/stable/nlp/text_normalization/wfst/intro.html " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Overview\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "F-IrnmXMTevr" + }, + "source": [ + "A sentence can be split up into semiotic tokens stemming from a variety of classes, where the spoken form differs from the written form. Examples are *dates*, *decimals*, *cardinals*, *measures* etc. The good TN or ITN system will be able to handle a variety of **semiotic classes**." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-IT1Xr9iW2Xr" + }, + "source": [ + "# How to use\n", + "## 1. Installation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "## Install NeMo, which installs both nemo and nemo_text_processing package\n", + "BRANCH = 'main'\n", + "!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]\n", + "\n", + "# install Pynini for text normalization\n", + "! wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/install_pynini.sh\n", + "! bash install_pynini.sh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# try to import of nemo_text_processing an other dependencies\n", + "import nemo_text_processing\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Text Normalization" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Bfs7fa9lXDDh" + }, + "outputs": [], + "source": [ + "# create text normalization instance that works on cased input\n", + "from nemo_text_processing.text_normalization.normalize import Normalizer\n", + "normalizer = Normalizer(input_case='cased', lang='en')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# the normalizer class offers the following parameterization. \n", + "print(normalizer.__doc__)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "> **_NOTE:_** Standard Text Normalization uses `deterministic=True`, outputting a single output for a given input string\n", + "\n", + "\n", + "\n", + "### 2.1 Run TN on input string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Normalizer.normalize() offers the following parameterization\n", + "print(normalizer.normalize.__doc__)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run normalization on example string input\n", + "written = \"We paid $123 for this desk.\"\n", + "normalized = normalizer.normalize(written, verbose=True, punct_post_process=True)\n", + "print(normalized)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "intermediate semiotic class information is shown if verbose=True. \n", + "\n", + "Long input text could be split into sentences as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "written = \"Mr. Smith paid $111 in U.S.A. on Dec. 17th. We paid $123 for this desk.\"\n", + "\n", + "# split long text into sentences\n", + "sentences = normalizer.split_text_into_sentences(written)\n", + "\n", + "for sent in sentences:\n", + " print(sent)\n", + "\n", + "# normalize each sentence separately using normalize() or all sentences at once with normalize_list()\n", + "normalizer.normalize_list(sentences)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "### 2.2 Run TN on list of input strings" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "UD-OuFmEOX3T" + }, + "outputs": [], + "source": [ + "# create temporary data folder and example input file\n", + "DATA_DIR = 'tmp_data_dir'\n", + "os.makedirs(DATA_DIR, exist_ok=True)\n", + "INPUT_FILE = f'{DATA_DIR}/inference.txt'\n", + "! echo -e 'The alarm went off at 10:00a.m. \\nI received $123' > $INPUT_FILE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d4T0gXHwY3JZ" + }, + "outputs": [], + "source": [ + "# check input file was properly created\n", + "! cat $INPUT_FILE" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load input file into 'data' - a list of strings\n", + "data = []\n", + "with open(INPUT_FILE, 'r') as fp:\n", + " for line in fp:\n", + " data.append(line.strip())\n", + "data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "F5wSJTI8ZFRg" + }, + "outputs": [], + "source": [ + "# run normalization on 'data'\n", + "normalizer.normalize_list(data, punct_post_process=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RMT5lkPYzZHK" + }, + "source": [ + "### 2.3 Evaluate TN on written-normalized text pairs \n", + "\n", + "The evaluation data needs to have the following format:\n", + "\n", + "'on 22 july 2022 they worked until 12:00' and the normalization is represented as " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# example evaluation sentence\n", + "eval_text = \"\"\"PLAIN\\ton\\t\n", + "DATE\\t22 july 2012\\tthe twenty second of july twenty twelve\n", + "PLAIN\\tthey\\t\n", + "PLAIN\\tworked\\t\n", + "PLAIN\\tuntil\\t\n", + "TIME\\t12:00\\ttwelve o'clock\n", + "\\t\n", + "\"\"\"\n", + "EVAL_FILE = f'{DATA_DIR}/eval.txt'\n", + "with open(EVAL_FILE, 'w') as fp:\n", + " fp.write(eval_text)\n", + "! cat $EVAL_FILE" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RMT5lkPYzZHK" + }, + "source": [ + "That is, every sentence is broken into semiotic tokens line by line and concluded by end of sentence token ``. In case of a plain token it's `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] `, otherwise `[SEMIOTIC CLASS] [TAB] [WRITTEN] [TAB] [NORMALIZED]`.\n", + "This format was introduced in [Google Text normalization dataset](https://arxiv.org/abs/1611.00068). " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Parse evaluation file into written and normalized sentence pairs\n", + "from nemo_text_processing.text_normalization.data_loader_utils import load_files, training_data_to_sentences\n", + "eval_data = load_files([EVAL_FILE])\n", + "sentences_un_normalized, sentences_normalized, sentences_class_types = training_data_to_sentences(eval_data)\n", + "print(list(zip(sentences_un_normalized, sentences_normalized)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run prediction\n", + "sentences_prediction = normalizer.normalize_list(sentences_un_normalized)\n", + "print(sentences_prediction)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# measure sentence accuracy\n", + "from nemo_text_processing.text_normalization.data_loader_utils import evaluate\n", + "sentences_accuracy = evaluate(\n", + " preds=sentences_prediction, labels=sentences_normalized, input=sentences_un_normalized\n", + " )\n", + "print(\"- Accuracy: \" + str(sentences_accuracy))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Inverse Text Normalization\n", + "ITN supports equivalent API as TN. Here we are only going to show inverse normalization on input string" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# create inverse text normalization instance\n", + "from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer\n", + "inverse_normalizer = InverseNormalizer(lang='en')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# run ITN on example string input\n", + "spoken = \"we paid one hundred twenty three dollars for this desk\"\n", + "un_normalized = inverse_normalizer.inverse_normalize(spoken, verbose=True)\n", + "print(un_normalized)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. Audio-based Text Normalization\n", + "Audio-based text normalization uses extended [WFST](https://en.wikipedia.org/wiki/Finite-state_machine) grammars to provide a range of possible normalization options.\n", + "The following example shows the workflow: (Disclaimer: exact values in graphic do not need to be real system's behavior)\n", + "1. text \"627\" is sent to extended TN WFST grammar\n", + "2. grammar output 5 different options of verbalization based on text input alone\n", + "3. in case an audio file is presented we compare the audio transcript with the verbalization options to find out which normalization is correct based on character error rate. The transcript is generated using a pretrained NeMo ASR model. \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following shows an example of how to generate multiple normalization options:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# import non-deterministic WFST-based TN module\n", + "from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# initialize normalizer, this may take some time to generate the extended grammars. \n", + "# Thus, we recommend to cache the grammars by specifying a cache directory\n", + "normalizer = NormalizerWithAudio(\n", + " lang=\"en\",\n", + " input_case=\"cased\",\n", + " overwrite_cache=False,\n", + " cache_dir=\"cache_dir\",\n", + " )\n", + "# create up to 10 normalization options\n", + "print(normalizer.normalize(\"123\", n_tagged=10, punct_post_process=True))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Parallel execution\n", + "\n", + "`Normalizer.normalize()` as well as `InverseNormalizer.inverse_normalize()` are functions without side effect.\n", + "Thus, if you need to normalize large amounts of input examples, these can be executed in parallel." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ENMDNl9C4TkF" + }, + "source": [ + "# Tutorial on how to customize grammars\n", + "\n", + "https://colab.research.google.com/github/NVIDIA/NeMo/blob/stable/tutorials/text_processing/WFST_Tutorial.ipynb\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lcvT3P2lQ_GS" + }, + "source": [ + "# References and Further Reading:\n", + "\n", + "\n", + "- [Zhang, Yang, Bakhturina, Evelina, Gorman, Kyle and Ginsburg, Boris. \"NeMo Inverse Text Normalization: From Development To Production.\" (2021)](https://arxiv.org/abs/2104.05055)\n", + "- [Ebden, Peter, and Richard Sproat. \"The Kestrel TTS text normalization system.\" Natural Language Engineering 21.3 (2015): 333.](https://www.cambridge.org/core/journals/natural-language-engineering/article/abs/kestrel-tts-text-normalization-system/F0C18A3F596B75D83B75C479E23795DA)\n", + "- [Gorman, Kyle. \"Pynini: A Python library for weighted finite-state grammar compilation.\" Proceedings of the SIGFSM Workshop on Statistical NLP and Weighted Automata. 2016.](https://www.aclweb.org/anthology/W16-2409.pdf)\n", + "- [Mohri, Mehryar, Fernando Pereira, and Michael Riley. \"Weighted finite-state transducers in speech recognition.\" Computer Speech & Language 16.1 (2002): 69-88.](https://cs.nyu.edu/~mohri/postscript/csl01.pdf)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [ + "lcvT3P2lQ_GS" + ], + "name": "Text_Normalization_Tutorial.ipynb", + "private_outputs": true, + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} \ No newline at end of file From 736666d0d1bcd97511de431152205727405f3f35 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 13:00:47 +0000 Subject: [PATCH 234/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/models/vits.py | 4 ++-- nemo/collections/tts/modules/monotonic_align/numba_core.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 939f97735fdf..3f8b3614bc2c 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -164,7 +164,7 @@ def configure_optimizers(self): return [optim_g, optim_d], [scheduler_g_dict, scheduler_d_dict] else: return [optim_g, optim_d] - + # for inference @typecheck( input_types={ @@ -374,7 +374,7 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': list_of_models = [] # TODO: List available models?? return list_of_models - + @typecheck( input_types={"text_tokens": NeuralType(('B', 'T_text'), TokenIndex())}, output_types={"audio": NeuralType(('B', 'T_audio'), AudioSignal())}, diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index f1e22a664d30..20c98daab543 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -17,6 +17,7 @@ import numpy as np import torch + @numba.jit(nopython=True, boundscheck=False, parallel=True) def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): """ @@ -79,5 +80,6 @@ def maximum_path(neg_cent, mask): maximum_path_c(path, neg_cent, t_t_max, t_s_max) return torch.from_numpy(path).to(device=device, dtype=dtype) + if __name__ == '__main__': pass From 7355a7f6c8ceb30c6f37054a1a99833bb46f3ef6 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 22 Dec 2022 05:03:08 -0800 Subject: [PATCH 235/244] imports fix Signed-off-by: Evgeniy Shabalin --- .../tts/modules/monotonic_align/__init__.py | 2 +- .../tts/modules/monotonic_align/numba_core.py | 31 ++++++++++--------- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index da36a9eccd7e..126827305be1 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -34,4 +34,4 @@ # SOFTWARE. -from .numba_core import maximum_path +from .numba_core import maximum_path \ No newline at end of file diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index f1e22a664d30..db4870ecf6ae 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -17,6 +17,22 @@ import numpy as np import torch +def maximum_path(neg_cent, mask): + """ Numba version. + neg_cent: [b, t_t, t_s] + mask: [b, t_t, t_s] + """ + device = neg_cent.device + dtype = neg_cent.dtype + neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) + path = np.zeros(neg_cent.shape, dtype=np.int32) + + t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) + t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) + maximum_path_c(path, neg_cent, t_t_max, t_s_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) + + @numba.jit(nopython=True, boundscheck=False, parallel=True) def maximum_path_each(path, value, t_y: int, t_x: int, max_neg_val=-1e9): """ @@ -64,20 +80,5 @@ def maximum_path_c(paths, values, t_ys, t_xs): maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) -def maximum_path(neg_cent, mask): - """ Numba version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) - if __name__ == '__main__': pass From 6d4a3db123a48f2ae7315f1b16e52f59a2522e79 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 22 Dec 2022 13:14:19 +0000 Subject: [PATCH 236/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/modules/monotonic_align/__init__.py | 2 +- nemo/collections/tts/modules/monotonic_align/numba_core.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nemo/collections/tts/modules/monotonic_align/__init__.py b/nemo/collections/tts/modules/monotonic_align/__init__.py index 126827305be1..da36a9eccd7e 100644 --- a/nemo/collections/tts/modules/monotonic_align/__init__.py +++ b/nemo/collections/tts/modules/monotonic_align/__init__.py @@ -34,4 +34,4 @@ # SOFTWARE. -from .numba_core import maximum_path \ No newline at end of file +from .numba_core import maximum_path diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index 3e85ea6bfa46..72dcb7c7065a 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -17,6 +17,7 @@ import numpy as np import torch + def maximum_path(neg_cent, mask): """ Numba version. neg_cent: [b, t_t, t_s] From fec3f9939bba539a0f1282bc4a48b536ee643a6a Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 22 Dec 2022 05:20:09 -0800 Subject: [PATCH 237/244] fix typo Signed-off-by: Evgeniy Shabalin --- .../tts/modules/monotonic_align/numba_core.py | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/nemo/collections/tts/modules/monotonic_align/numba_core.py b/nemo/collections/tts/modules/monotonic_align/numba_core.py index 3e85ea6bfa46..db4870ecf6ae 100644 --- a/nemo/collections/tts/modules/monotonic_align/numba_core.py +++ b/nemo/collections/tts/modules/monotonic_align/numba_core.py @@ -80,24 +80,5 @@ def maximum_path_c(paths, values, t_ys, t_xs): maximum_path_each(paths[i], values[i], t_ys[i], t_xs[i]) -<<<<<<< HEAD -======= -def maximum_path(neg_cent, mask): - """ Numba version. - neg_cent: [b, t_t, t_s] - mask: [b, t_t, t_s] - """ - device = neg_cent.device - dtype = neg_cent.dtype - neg_cent = neg_cent.data.cpu().numpy().astype(np.float32) - path = np.zeros(neg_cent.shape, dtype=np.int32) - - t_t_max = mask.sum(1)[:, 0].data.cpu().numpy().astype(np.int32) - t_s_max = mask.sum(2)[:, 0].data.cpu().numpy().astype(np.int32) - maximum_path_c(path, neg_cent, t_t_max, t_s_max) - return torch.from_numpy(path).to(device=device, dtype=dtype) - - ->>>>>>> 736666d0d1bcd97511de431152205727405f3f35 if __name__ == '__main__': pass From 725f24f720c4a6b22845e09a34dd2af539ee0414 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Thu, 22 Dec 2022 05:46:17 -0800 Subject: [PATCH 238/244] excessive comtutations fix Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/torch/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 3f4ea2e47be2..4b393a9f7c27 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -1053,9 +1053,9 @@ def _create_buckets(self): self.boundaries.pop(i + 1) num_samples_per_bucket = [] + total_batch_size = self.num_replicas * self.batch_size for i in range(len(buckets)): len_bucket = len(buckets[i]) - total_batch_size = self.num_replicas * self.batch_size rem = (total_batch_size - (len_bucket % total_batch_size)) % total_batch_size num_samples_per_bucket.append(len_bucket + rem) return buckets, num_samples_per_bucket From 4763c7bd50780af57f72de6b6d3814809b34608d Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Fri, 23 Dec 2022 06:39:21 -0800 Subject: [PATCH 239/244] typecheck fix Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/models/vits.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 3f8b3614bc2c..7e848dd92c14 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -168,8 +168,8 @@ def configure_optimizers(self): # for inference @typecheck( input_types={ - "text": NeuralType(('B', 'T_text'), TokenIndex()), - "speaker": NeuralType(('B',), Index(), optional=True), + "tokens": NeuralType(('B', 'T_text'), TokenIndex()), + "speakers": NeuralType(('B',), Index(), optional=True), "noise_scale": NeuralType(('B',), FloatType(), optional=True), "length_scale": NeuralType(('B',), FloatType(), optional=True), "noise_scale_w": NeuralType(('B',), FloatType(), optional=True), @@ -376,8 +376,9 @@ def list_available_models(cls) -> 'List[PretrainedModelInfo]': return list_of_models @typecheck( - input_types={"text_tokens": NeuralType(('B', 'T_text'), TokenIndex())}, + input_types={"tokens": NeuralType(('B', 'T_text'), TokenIndex(), optional=True),}, output_types={"audio": NeuralType(('B', 'T_audio'), AudioSignal())}, ) def convert_text_to_waveform(self, *, tokens, speakers=None): - return self(tokens, speakers=speakers)[0].squeeze(1) + audio = self(tokens=tokens, speakers=speakers)[0].squeeze(1) + return audio From d6deedb8bdbd9fb93fc92174740a24f5b3ad6bee Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Mon, 9 Jan 2023 11:30:55 -0800 Subject: [PATCH 240/244] Small refactoring --- nemo/collections/tts/helpers/helpers.py | 2 +- nemo/collections/tts/models/vits.py | 4 +- nemo/collections/tts/modules/vits_modules.py | 80 -------------------- 3 files changed, 3 insertions(+), 83 deletions(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index e220a54bf1f9..f8763bffeb7f 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -547,7 +547,7 @@ def split_view(tensor, split_size: int, dim: int = 0): def slice_segments(x, ids_str, segment_size=4): """ - Slices segments from batch + Time-wise slicing (patching) of bathches for audio/spectrogram """ ret = torch.zeros_like(x[:, :, :segment_size]) for i in range(x.size(0)): diff --git a/nemo/collections/tts/models/vits.py b/nemo/collections/tts/models/vits.py index 7e848dd92c14..d035c6a1b3ac 100644 --- a/nemo/collections/tts/models/vits.py +++ b/nemo/collections/tts/models/vits.py @@ -259,9 +259,9 @@ def training_step(self, batch, batch_idx): metrics = { "loss_gen": loss_gen, "loss_fm": loss_fm, - "loss_mel * c_mel": loss_mel, + "loss_mel": loss_mel, "loss_dur": loss_dur, - "loss_kl * c_kl": loss_kl, + "loss_kl": loss_kl, "loss_gen_all": loss_gen_all, "loss_disc_all": loss_disc_all, "grad_gen": norm_g, diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 682faddcb4ac..74b944c53c26 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -997,86 +997,6 @@ def forward(self, x, x_mask): return x -# class Decoder(nn.Module): -# def __init__( -# self, -# hidden_channels, -# filter_channels, -# n_heads, -# n_layers, -# kernel_size=1, -# p_dropout=0.0, -# proximal_bias=False, -# proximal_init=True, -# **kwargs -# ): -# super().__init__() -# self.hidden_channels = hidden_channels -# self.filter_channels = filter_channels -# self.n_heads = n_heads -# self.n_layers = n_layers -# self.kernel_size = kernel_size -# self.p_dropout = p_dropout -# self.proximal_bias = proximal_bias -# self.proximal_init = proximal_init - -# self.drop = nn.Dropout(p_dropout) -# self.self_attn_layers = nn.ModuleList() -# self.norm_layers_0 = nn.ModuleList() -# self.encdec_attn_layers = nn.ModuleList() -# self.norm_layers_1 = nn.ModuleList() -# self.ffn_layers = nn.ModuleList() -# self.norm_layers_2 = nn.ModuleList() -# for i in range(self.n_layers): -# self.self_attn_layers.append( -# MultiHeadAttention( -# hidden_channels, -# hidden_channels, -# n_heads, -# p_dropout=p_dropout, -# proximal_bias=proximal_bias, -# proximal_init=proximal_init, -# ) -# ) -# self.norm_layers_0.append(LayerNorm(hidden_channels)) -# self.encdec_attn_layers.append( -# MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout) -# ) -# self.norm_layers_1.append(LayerNorm(hidden_channels)) -# self.ffn_layers.append( -# FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True) -# ) -# self.norm_layers_2.append(LayerNorm(hidden_channels)) - -# def forward(self, x, x_mask, h, h_mask): -# """ -# x: decoder input -# h: encoder output -# """ -# self_attn_mask = ( -# torch.tril(torch.ones(x_mask.size(2), x_mask.size(2))) -# .unsqueeze(0) -# .unsqueeze(0) -# .to(device=x.device, dtype=x.dtype) -# ) -# encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) -# x = x * x_mask -# for i in range(self.n_layers): -# y = self.self_attn_layers[i](x, x, self_attn_mask) -# y = self.drop(y) -# x = self.norm_layers_0[i](x + y) - -# y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) -# y = self.drop(y) -# x = self.norm_layers_1[i](x + y) - -# y = self.ffn_layers[i](x, x_mask) -# y = self.drop(y) -# x = self.norm_layers_2[i](x + y) -# x = x * x_mask -# return x - - class MultiHeadAttention(nn.Module): def __init__( self, From d52e6d561e52c8f43b5a898a886afb1e39be694b Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Fri, 13 Jan 2023 10:43:03 -0800 Subject: [PATCH 241/244] Small refactoring Signed-off-by: Evgeniy Shabalin --- examples/tts/conf/vits_44100.yaml | 16 ++++++++++------ nemo/collections/tts/helpers/helpers.py | 2 ++ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml index 5eb3c98cff9a..7c5461486adf 100644 --- a/examples/tts/conf/vits_44100.yaml +++ b/examples/tts/conf/vits_44100.yaml @@ -2,8 +2,6 @@ # If you want to train model on other dataset, you can change config values according to your dataset. # Most dataset-specific arguments are in the head of the config file, see below. -# TODO: remove unnecessary arguments, refactoring - name: VITS train_dataset: ??? @@ -98,7 +96,7 @@ model: pin_memory: false batch_sampler: - batch_size: 2 + batch_size: 32 boundaries: [32,300,400,500,600,700,800,900,1000] num_replicas: ${trainer.devices} shuffle: true @@ -127,7 +125,7 @@ model: dataloader_params: drop_last: false shuffle: false - batch_size: 2 + batch_size: 32 num_workers: 4 pin_memory: false @@ -202,12 +200,18 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ??? + exp_dir: ../exps/vits_hifitts name: ${name} - create_tensorboard_logger: true + create_tensorboard_logger: false create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min + create_wandb_logger: true + wandb_logger_kwargs: + name: vits_fp16_local + project: ${name} + entity: nvidia + resume: "allow" resume_if_exists: false resume_ignore_no_checkpoint: false diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index f8763bffeb7f..0b165d1bd88e 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -548,6 +548,7 @@ def split_view(tensor, split_size: int, dim: int = 0): def slice_segments(x, ids_str, segment_size=4): """ Time-wise slicing (patching) of bathches for audio/spectrogram + [B x C x T] -> [B x C x segment_size] """ ret = torch.zeros_like(x[:, :, :segment_size]) for i in range(x.size(0)): @@ -564,6 +565,7 @@ def slice_segments(x, ids_str, segment_size=4): def rand_slice_segments(x, x_lengths=None, segment_size=4): """ Chooses random indices and slices segments from batch + [B x C x T] -> [B x C x segment_size] """ b, d, t = x.size() if x_lengths is None: From ab868d91e23cef4d897a96dda9f4cf1436a2bcef Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Fri, 13 Jan 2023 10:49:19 -0800 Subject: [PATCH 242/244] reversed exp_manager params Signed-off-by: Evgeniy Shabalin --- examples/tts/conf/vits_44100.yaml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/examples/tts/conf/vits_44100.yaml b/examples/tts/conf/vits_44100.yaml index 7c5461486adf..c9955e70abce 100644 --- a/examples/tts/conf/vits_44100.yaml +++ b/examples/tts/conf/vits_44100.yaml @@ -200,18 +200,12 @@ trainer: check_val_every_n_epoch: 1 exp_manager: - exp_dir: ../exps/vits_hifitts + exp_dir: ??? name: ${name} - create_tensorboard_logger: false + create_tensorboard_logger: true create_checkpoint_callback: true checkpoint_callback_params: monitor: loss_gen_all mode: min - create_wandb_logger: true - wandb_logger_kwargs: - name: vits_fp16_local - project: ${name} - entity: nvidia - resume: "allow" resume_if_exists: false resume_ignore_no_checkpoint: false From 120a73670f00c1d79803f4bef235c4c0c16fc639 Mon Sep 17 00:00:00 2001 From: Evgeniy Shabalin Date: Wed, 18 Jan 2023 12:54:56 -0800 Subject: [PATCH 243/244] Fixed call for new function signature Signed-off-by: Evgeniy Shabalin --- nemo/collections/tts/helpers/helpers.py | 2 +- nemo/collections/tts/modules/vits_modules.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nemo/collections/tts/helpers/helpers.py b/nemo/collections/tts/helpers/helpers.py index dce20237cc36..a2ee904d0536 100644 --- a/nemo/collections/tts/helpers/helpers.py +++ b/nemo/collections/tts/helpers/helpers.py @@ -636,7 +636,7 @@ def generate_path(duration, mask): cum_duration = torch.cumsum(duration, -1) cum_duration_flat = cum_duration.view(b * t_x) - path = get_mask_from_lengths(cum_duration_flat, t_y).to(mask.dtype) + path = get_mask_from_lengths(cum_duration_flat, torch.Tensor(t_y).reshape(1, 1, -1)).to(mask.dtype) path = path.view(b, t_x, t_y) path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] path = path.unsqueeze(1).transpose(2, 3) * mask diff --git a/nemo/collections/tts/modules/vits_modules.py b/nemo/collections/tts/modules/vits_modules.py index 74b944c53c26..1793f1f10565 100644 --- a/nemo/collections/tts/modules/vits_modules.py +++ b/nemo/collections/tts/modules/vits_modules.py @@ -519,7 +519,7 @@ def __init__( def forward(self, x, x_lengths): x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype) + x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x), 1).to(x.dtype) x = self.encoder(x * x_mask, x_mask) stats = self.proj(x) * x_mask @@ -582,7 +582,7 @@ def __init__( self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x.size(2)), 1).to(x.dtype).to(device=x.device) + x_mask = torch.unsqueeze(get_mask_from_lengths(x_lengths, x), 1).to(x.dtype).to(device=x.device) x = self.pre(x) * x_mask x = self.enc(x, x_mask, g=g) stats = self.proj(x) * x_mask From e6547cb7d3ac4968f4f49aa8d7bc2f873d1a3fc8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 19 Jan 2023 13:38:13 +0000 Subject: [PATCH 244/244] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nemo/collections/tts/torch/data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nemo/collections/tts/torch/data.py b/nemo/collections/tts/torch/data.py index 4729ae2f20be..113826af8cef 100644 --- a/nemo/collections/tts/torch/data.py +++ b/nemo/collections/tts/torch/data.py @@ -1542,4 +1542,4 @@ def set_epoch(self, epoch: int) -> None: Args: epoch (int): Epoch number. """ - self.epoch = epoch \ No newline at end of file + self.epoch = epoch